You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2018/12/07 22:28:44 UTC

[01/51] [abbrv] hbase git commit: HBASE-21255 [acl] Refactor TablePermission into three classes (Global, Namespace, Table) [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-20952 9bfd69e9b -> fb59426b7 (forced update)


http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index fb22ac0..0c1e761 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
-import org.apache.hadoop.hbase.security.access.TablePermission;
+import org.apache.hadoop.hbase.security.access.UserPermission;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -435,10 +435,10 @@ public final class SnapshotDescriptionUtils {
 
   private static SnapshotDescription writeAclToSnapshotDescription(SnapshotDescription snapshot,
       Configuration conf) throws IOException {
-    ListMultimap<String, TablePermission> perms =
-        User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, TablePermission>>() {
+    ListMultimap<String, UserPermission> perms =
+        User.runAsLoginUser(new PrivilegedExceptionAction<ListMultimap<String, UserPermission>>() {
           @Override
-          public ListMultimap<String, TablePermission> run() throws Exception {
+          public ListMultimap<String, UserPermission> run() throws Exception {
             return AccessControlLists.getTablePermissions(conf,
               TableName.valueOf(snapshot.getTable()));
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 78bb5f6..1b70054 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -274,7 +274,7 @@ public class TestAccessController extends SecureTestUtil {
   public static void tearDownAfterClass() throws Exception {
     cleanUp();
     TEST_UTIL.shutdownMiniCluster();
-    int total = TableAuthManager.getTotalRefCount();
+    int total = AuthManager.getTotalRefCount();
     assertTrue("Unexpected reference count: " + total, total == 0);
   }
 
@@ -1634,12 +1634,12 @@ public class TestAccessController extends SecureTestUtil {
       }
 
       UserPermission ownerperm =
-          new UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, Action.values());
+          new UserPermission(USER_OWNER.getName(), tableName, Action.values());
       assertTrue("Owner should have all permissions on table",
         hasFoundUserPermission(ownerperm, perms));
 
       User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]);
-      byte[] userName = Bytes.toBytes(user.getShortName());
+      String userName = user.getShortName();
 
       UserPermission up =
           new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ);
@@ -1725,7 +1725,7 @@ public class TestAccessController extends SecureTestUtil {
       }
 
       UserPermission newOwnerperm =
-          new UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, Action.values());
+          new UserPermission(newOwner.getName(), tableName, Action.values());
       assertTrue("New owner should have all permissions on table",
         hasFoundUserPermission(newOwnerperm, perms));
     } finally {
@@ -1749,12 +1749,10 @@ public class TestAccessController extends SecureTestUtil {
 
     Collection<String> superUsers = Superusers.getSuperUsers();
     List<UserPermission> adminPerms = new ArrayList<>(superUsers.size() + 1);
-    adminPerms.add(new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
-      AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW")));
-
+    adminPerms.add(new UserPermission(USER_ADMIN.getShortName(), Bytes.toBytes("ACRW")));
     for(String user: superUsers) {
-      adminPerms.add(new UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-          null, null, Action.values()));
+      // Global permission
+      adminPerms.add(new UserPermission(user, Action.values()));
     }
     assertTrue("Only super users, global users and user admin has permission on table hbase:acl " +
         "per setup", perms.size() == 5 + superUsers.size() &&
@@ -2432,7 +2430,7 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(getAction, testGrantRevoke);
     verifyDenied(putAction, testGrantRevoke);
 
-    // Grant global READ permissions to testGrantRevoke.
+    // Grant global WRITE permissions to testGrantRevoke.
     try {
       grantGlobalUsingAccessControlClient(TEST_UTIL, systemUserConnection, userName,
               Permission.Action.WRITE);
@@ -2757,8 +2755,11 @@ public class TestAccessController extends SecureTestUtil {
       assertTrue(namespacePermissions != null);
       assertEquals(expectedAmount, namespacePermissions.size());
       for (UserPermission namespacePermission : namespacePermissions) {
-        assertFalse(namespacePermission.isGlobal());  // Verify it is not a global user permission
-        assertEquals(expectedNamespace, namespacePermission.getNamespace());  // Verify namespace is set
+        // Verify it is not a global user permission
+        assertFalse(namespacePermission.getAccessScope() == Permission.Scope.GLOBAL);
+        // Verify namespace is set
+        NamespacePermission nsPerm = (NamespacePermission) namespacePermission.getPermission();
+        assertEquals(expectedNamespace, nsPerm.getNamespace());
       }
     } catch (Throwable thw) {
       throw new HBaseException(thw);
@@ -3125,8 +3126,8 @@ public class TestAccessController extends SecureTestUtil {
       Permission.Action[] expectedAction = { Action.READ };
       boolean userFound = false;
       for (UserPermission p : userPermissions) {
-        if (testUserPerms.getShortName().equals(Bytes.toString(p.getUser()))) {
-          assertArrayEquals(expectedAction, p.getActions());
+        if (testUserPerms.getShortName().equals(p.getUser())) {
+          assertArrayEquals(expectedAction, p.getPermission().getActions());
           userFound = true;
           break;
         }
@@ -3593,15 +3594,24 @@ public class TestAccessController extends SecureTestUtil {
     assertEquals(resultCount, userPermissions.size());
 
     for (UserPermission perm : userPermissions) {
-      if (cf != null) {
-        assertTrue(Bytes.equals(cf, perm.getFamily()));
-      }
-      if (cq != null) {
-        assertTrue(Bytes.equals(cq, perm.getQualifier()));
-      }
-      if (userName != null
-          && (superUsers == null || !superUsers.contains(Bytes.toString(perm.getUser())))) {
-        assertTrue(userName.equals(Bytes.toString(perm.getUser())));
+      if (perm.getPermission() instanceof TablePermission) {
+        TablePermission tablePerm = (TablePermission) perm.getPermission();
+        if (cf != null) {
+          assertTrue(Bytes.equals(cf, tablePerm.getFamily()));
+        }
+        if (cq != null) {
+          assertTrue(Bytes.equals(cq, tablePerm.getQualifier()));
+        }
+        if (userName != null
+          && (superUsers == null || !superUsers.contains(perm.getUser()))) {
+          assertTrue(userName.equals(perm.getUser()));
+        }
+      } else if (perm.getPermission() instanceof NamespacePermission ||
+          perm.getPermission() instanceof GlobalPermission) {
+        if (userName != null &&
+          (superUsers == null || !superUsers.contains(perm.getUser()))) {
+          assertTrue(userName.equals(perm.getUser()));
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
index 21c1438..eb2a5ac 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController2.java
@@ -201,17 +201,17 @@ public class TestAccessController2 extends SecureTestUtil {
     TEST_UTIL.waitTableAvailable(TEST_TABLE.getTableName());
     // Verify that owner permissions have been granted to the test user on the
     // table just created
-    List<TablePermission> perms =
+    List<UserPermission> perms =
       AccessControlLists.getTablePermissions(conf, TEST_TABLE.getTableName())
        .get(testUser.getShortName());
     assertNotNull(perms);
     assertFalse(perms.isEmpty());
     // Should be RWXCA
-    assertTrue(perms.get(0).implies(Permission.Action.READ));
-    assertTrue(perms.get(0).implies(Permission.Action.WRITE));
-    assertTrue(perms.get(0).implies(Permission.Action.EXEC));
-    assertTrue(perms.get(0).implies(Permission.Action.CREATE));
-    assertTrue(perms.get(0).implies(Permission.Action.ADMIN));
+    assertTrue(perms.get(0).getPermission().implies(Permission.Action.READ));
+    assertTrue(perms.get(0).getPermission().implies(Permission.Action.WRITE));
+    assertTrue(perms.get(0).getPermission().implies(Permission.Action.EXEC));
+    assertTrue(perms.get(0).getPermission().implies(Permission.Action.CREATE));
+    assertTrue(perms.get(0).getPermission().implies(Permission.Action.ADMIN));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
index 6ca2ef8..7b10e3f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController3.java
@@ -57,7 +57,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Performs checks for reference counting w.r.t. TableAuthManager which is used by
+ * Performs checks for reference counting w.r.t. AuthManager which is used by
  * AccessController.
  *
  * NOTE: Only one test in  here. In AMv2, there is problem deleting because

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java
index 66e37bc..d37794d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java
@@ -206,11 +206,9 @@ public class TestNamespaceCommands extends SecureTestUtil {
     String userTestNamespace = "userTestNsp";
     Table acl = UTIL.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME);
     try {
-      ListMultimap<String, TablePermission> perms =
-          AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
-
-      perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
-      for (Map.Entry<String, TablePermission> entry : perms.entries()) {
+      ListMultimap<String, UserPermission> perms =
+        AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
+      for (Map.Entry<String, UserPermission> entry : perms.entries()) {
         LOG.debug(Objects.toString(entry));
       }
       assertEquals(6, perms.size());
@@ -223,15 +221,13 @@ public class TestNamespaceCommands extends SecureTestUtil {
       assertTrue(result != null);
       perms = AccessControlLists.getNamespacePermissions(conf, TEST_NAMESPACE);
       assertEquals(7, perms.size());
-      List<TablePermission> namespacePerms = perms.get(userTestNamespace);
+      List<UserPermission> namespacePerms = perms.get(userTestNamespace);
       assertTrue(perms.containsKey(userTestNamespace));
       assertEquals(1, namespacePerms.size());
       assertEquals(TEST_NAMESPACE,
-        namespacePerms.get(0).getNamespace());
-      assertEquals(null, namespacePerms.get(0).getFamily());
-      assertEquals(null, namespacePerms.get(0).getQualifier());
-      assertEquals(1, namespacePerms.get(0).getActions().length);
-      assertEquals(Permission.Action.WRITE, namespacePerms.get(0).getActions()[0]);
+        ((NamespacePermission) namespacePerms.get(0).getPermission()).getNamespace());
+      assertEquals(1, namespacePerms.get(0).getPermission().getActions().length);
+      assertEquals(Permission.Action.WRITE, namespacePerms.get(0).getPermission().getActions()[0]);
 
       // Revoke and check state in ACL table
       revokeFromNamespace(UTIL, userTestNamespace, TEST_NAMESPACE,

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java
index 55873bb..5aa9ed6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestRpcAccessChecks.java
@@ -147,8 +147,10 @@ public class TestRpcAccessChecks {
         User.createUserForTesting(conf, "user_group_admin", new String[] { GROUP_ADMIN });
 
     // Assign permissions to users and groups
-    SecureTestUtil.grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(), Permission.Action.ADMIN);
-    SecureTestUtil.grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN), Permission.Action.ADMIN);
+    SecureTestUtil.grantGlobal(TEST_UTIL, USER_ADMIN.getShortName(),
+      Permission.Action.ADMIN, Permission.Action.CREATE);
+    SecureTestUtil.grantGlobal(TEST_UTIL, toGroupEntry(GROUP_ADMIN),
+      Permission.Action.ADMIN, Permission.Action.CREATE);
     // No permissions to USER_NON_ADMIN
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index 7243690..1c478b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -143,26 +143,24 @@ public class TestTablePermissions {
     try (Connection connection = ConnectionFactory.createConnection(conf)) {
       // add some permissions
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("george"), TEST_TABLE, null, (byte[])null,
-              UserPermission.Action.READ, UserPermission.Action.WRITE),
-              connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("george", TEST_TABLE, Permission.Action.READ, Permission.Action.WRITE),
+        connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("hubert"), TEST_TABLE, null, (byte[])null,
-              UserPermission.Action.READ),
-          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("hubert", TEST_TABLE, Permission.Action.READ),
+        connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("humphrey"),
-              TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
-              UserPermission.Action.READ),
-          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("humphrey", TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
+          Permission.Action.READ),
+        connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
     // retrieve the same
-    ListMultimap<String,TablePermission> perms =
+    ListMultimap<String, UserPermission> perms =
         AccessControlLists.getTablePermissions(conf, TEST_TABLE);
-    List<TablePermission> userPerms = perms.get("george");
+    List<UserPermission> userPerms = perms.get("george");
     assertNotNull("Should have permissions for george", userPerms);
     assertEquals("Should have 1 permission for george", 1, userPerms.size());
-    TablePermission permission = userPerms.get(0);
+    assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope());
+    TablePermission permission = (TablePermission) userPerms.get(0).getPermission();
     assertEquals("Permission should be for " + TEST_TABLE,
         TEST_TABLE, permission.getTableName());
     assertNull("Column family should be empty", permission.getFamily());
@@ -170,14 +168,15 @@ public class TestTablePermissions {
     // check actions
     assertNotNull(permission.getActions());
     assertEquals(2, permission.getActions().length);
-    List<TablePermission.Action> actions = Arrays.asList(permission.getActions());
+    List<Permission.Action> actions = Arrays.asList(permission.getActions());
     assertTrue(actions.contains(TablePermission.Action.READ));
     assertTrue(actions.contains(TablePermission.Action.WRITE));
 
     userPerms = perms.get("hubert");
     assertNotNull("Should have permissions for hubert", userPerms);
     assertEquals("Should have 1 permission for hubert", 1, userPerms.size());
-    permission = userPerms.get(0);
+    assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope());
+    permission = (TablePermission) userPerms.get(0).getPermission();
     assertEquals("Permission should be for " + TEST_TABLE,
         TEST_TABLE, permission.getTableName());
     assertNull("Column family should be empty", permission.getFamily());
@@ -192,7 +191,8 @@ public class TestTablePermissions {
     userPerms = perms.get("humphrey");
     assertNotNull("Should have permissions for humphrey", userPerms);
     assertEquals("Should have 1 permission for humphrey", 1, userPerms.size());
-    permission = userPerms.get(0);
+    assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope());
+    permission = (TablePermission) userPerms.get(0).getPermission();
     assertEquals("Permission should be for " + TEST_TABLE,
         TEST_TABLE, permission.getTableName());
     assertTrue("Permission should be for family " + Bytes.toString(TEST_FAMILY),
@@ -211,11 +211,11 @@ public class TestTablePermissions {
     try (Connection connection = ConnectionFactory.createConnection(conf);
         Table table = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
       AccessControlLists.addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("hubert"), TEST_TABLE2, null, (byte[])null,
-              TablePermission.Action.READ, TablePermission.Action.WRITE), table);
+        new UserPermission("hubert", TEST_TABLE2, Permission.Action.READ, Permission.Action.WRITE),
+        table);
     }
     // check full load
-    Map<byte[], ListMultimap<String,TablePermission>> allPerms =
+    Map<byte[], ListMultimap<String, UserPermission>> allPerms =
         AccessControlLists.loadAll(conf);
     assertEquals("Full permission map should have entries for both test tables",
         2, allPerms.size());
@@ -223,20 +223,22 @@ public class TestTablePermissions {
     userPerms = allPerms.get(TEST_TABLE.getName()).get("hubert");
     assertNotNull(userPerms);
     assertEquals(1, userPerms.size());
-    permission = userPerms.get(0);
+    assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope());
+    permission = (TablePermission) userPerms.get(0).getPermission();
     assertEquals(TEST_TABLE, permission.getTableName());
     assertEquals(1, permission.getActions().length);
-    assertEquals(TablePermission.Action.READ, permission.getActions()[0]);
+    assertEquals(Permission.Action.READ, permission.getActions()[0]);
 
     userPerms = allPerms.get(TEST_TABLE2.getName()).get("hubert");
     assertNotNull(userPerms);
     assertEquals(1, userPerms.size());
-    permission = userPerms.get(0);
+    assertEquals(Permission.Scope.TABLE, userPerms.get(0).getAccessScope());
+    permission = (TablePermission) userPerms.get(0).getPermission();
     assertEquals(TEST_TABLE2, permission.getTableName());
     assertEquals(2, permission.getActions().length);
     actions = Arrays.asList(permission.getActions());
-    assertTrue(actions.contains(TablePermission.Action.READ));
-    assertTrue(actions.contains(TablePermission.Action.WRITE));
+    assertTrue(actions.contains(Permission.Action.READ));
+    assertTrue(actions.contains(Permission.Action.WRITE));
   }
 
   @Test
@@ -244,30 +246,27 @@ public class TestTablePermissions {
     Configuration conf = UTIL.getConfiguration();
     try (Connection connection = ConnectionFactory.createConnection(conf)) {
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("albert"), TEST_TABLE, null,
-              (byte[])null, TablePermission.Action.READ), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("albert", TEST_TABLE, Permission.Action.READ),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("betty"), TEST_TABLE, null,
-              (byte[])null, TablePermission.Action.READ,
-              TablePermission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("betty", TEST_TABLE, Permission.Action.READ, Permission.Action.WRITE),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("clark"),
-              TEST_TABLE, TEST_FAMILY,
-              TablePermission.Action.READ), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("clark", TEST_TABLE, TEST_FAMILY, Permission.Action.READ),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("dwight"),
-              TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
-              TablePermission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        new UserPermission("dwight", TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
+          Permission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
     // verify permissions survive changes in table metadata
-    ListMultimap<String,TablePermission> preperms =
+    ListMultimap<String, UserPermission> preperms =
         AccessControlLists.getTablePermissions(conf, TEST_TABLE);
 
     Table table = UTIL.getConnection().getTable(TEST_TABLE);
-    table.put(new Put(Bytes.toBytes("row1"))
-            .addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
-    table.put(new Put(Bytes.toBytes("row2"))
-            .addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
+    table.put(
+      new Put(Bytes.toBytes("row1")).addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v1")));
+    table.put(
+      new Put(Bytes.toBytes("row2")).addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes("v2")));
     Admin admin = UTIL.getAdmin();
     try {
       admin.split(TEST_TABLE);
@@ -283,7 +282,7 @@ public class TestTablePermissions {
     // wait for split
     Thread.sleep(10000);
 
-    ListMultimap<String,TablePermission> postperms =
+    ListMultimap<String, UserPermission> postperms =
         AccessControlLists.getTablePermissions(conf, TEST_TABLE);
 
     checkMultimapEqual(preperms, postperms);
@@ -292,41 +291,42 @@ public class TestTablePermissions {
   @Test
   public void testSerialization() throws Exception {
     Configuration conf = UTIL.getConfiguration();
-    ListMultimap<String,TablePermission> permissions = createPermissions();
+    ListMultimap<String, UserPermission> permissions = createPermissions();
     byte[] permsData = AccessControlLists.writePermissionsAsBytes(permissions, conf);
 
-    ListMultimap<String, TablePermission> copy =
-        AccessControlLists.readPermissions(permsData, conf);
+    ListMultimap<String, UserPermission> copy =
+        AccessControlLists.readUserPermission(permsData, conf);
 
     checkMultimapEqual(permissions, copy);
   }
 
-  private ListMultimap<String,TablePermission> createPermissions() {
-    ListMultimap<String,TablePermission> permissions = ArrayListMultimap.create();
-    permissions.put("george", new TablePermission(TEST_TABLE, null,
-        TablePermission.Action.READ));
-    permissions.put("george", new TablePermission(TEST_TABLE, TEST_FAMILY,
-        TablePermission.Action.WRITE));
-    permissions.put("george", new TablePermission(TEST_TABLE2, null,
-        TablePermission.Action.READ));
-    permissions.put("hubert", new TablePermission(TEST_TABLE2, null,
-        TablePermission.Action.READ, TablePermission.Action.WRITE));
-    permissions.put("bruce",new TablePermission(TEST_NAMESPACE,
-        TablePermission.Action.READ));
+  private ListMultimap<String, UserPermission> createPermissions() {
+    ListMultimap<String, UserPermission> permissions = ArrayListMultimap.create();
+    permissions.put("george",
+      new UserPermission("george", TEST_TABLE, Permission.Action.READ));
+    permissions.put("george",
+      new UserPermission("george", TEST_TABLE, TEST_FAMILY, Permission.Action.WRITE));
+    permissions.put("george",
+      new UserPermission("george", TEST_TABLE2, Permission.Action.READ));
+    permissions.put("hubert",
+      new UserPermission("hubert", TEST_TABLE2, Permission.Action.READ,
+        Permission.Action.WRITE));
+    permissions.put("bruce",
+      new UserPermission("bruce", TEST_NAMESPACE, Permission.Action.READ));
     return permissions;
   }
 
-  public void checkMultimapEqual(ListMultimap<String,TablePermission> first,
-      ListMultimap<String,TablePermission> second) {
+  public void checkMultimapEqual(ListMultimap<String, UserPermission> first,
+      ListMultimap<String, UserPermission> second) {
     assertEquals(first.size(), second.size());
     for (String key : first.keySet()) {
-      List<TablePermission> firstPerms = first.get(key);
-      List<TablePermission> secondPerms = second.get(key);
+      List<UserPermission> firstPerms = first.get(key);
+      List<UserPermission> secondPerms = second.get(key);
       assertNotNull(secondPerms);
       assertEquals(firstPerms.size(), secondPerms.size());
       LOG.info("First permissions: "+firstPerms.toString());
       LOG.info("Second permissions: "+secondPerms.toString());
-      for (TablePermission p : firstPerms) {
+      for (UserPermission p : firstPerms) {
         assertTrue("Permission "+p.toString()+" not found", secondPerms.contains(p));
       }
     }
@@ -334,13 +334,13 @@ public class TestTablePermissions {
 
   @Test
   public void testEquals() throws Exception {
-    TablePermission p1 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ);
-    TablePermission p2 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ);
+    Permission p1 = new TablePermission(TEST_TABLE, Permission.Action.READ);
+    Permission p2 = new TablePermission(TEST_TABLE, Permission.Action.READ);
     assertTrue(p1.equals(p2));
     assertTrue(p2.equals(p1));
 
-    p1 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ, TablePermission.Action.WRITE);
-    p2 = new TablePermission(TEST_TABLE, null, TablePermission.Action.WRITE, TablePermission.Action.READ);
+    p1 = new TablePermission(TEST_TABLE, TablePermission.Action.READ, TablePermission.Action.WRITE);
+    p2 = new TablePermission(TEST_TABLE, TablePermission.Action.WRITE, TablePermission.Action.READ);
     assertTrue(p1.equals(p2));
     assertTrue(p2.equals(p1));
 
@@ -354,34 +354,30 @@ public class TestTablePermissions {
     assertTrue(p1.equals(p2));
     assertTrue(p2.equals(p1));
 
-    p1 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ);
+    p1 = new TablePermission(TEST_TABLE, TablePermission.Action.READ);
     p2 = new TablePermission(TEST_TABLE, TEST_FAMILY, TablePermission.Action.READ);
     assertFalse(p1.equals(p2));
     assertFalse(p2.equals(p1));
 
-    p1 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ);
-    p2 = new TablePermission(TEST_TABLE, null, TablePermission.Action.WRITE);
+    p1 = new TablePermission(TEST_TABLE, TablePermission.Action.READ);
+    p2 = new TablePermission(TEST_TABLE, TablePermission.Action.WRITE);
     assertFalse(p1.equals(p2));
     assertFalse(p2.equals(p1));
-    p2 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ, TablePermission.Action.WRITE);
+    p2 = new TablePermission(TEST_TABLE, TablePermission.Action.READ, TablePermission.Action.WRITE);
     assertFalse(p1.equals(p2));
     assertFalse(p2.equals(p1));
 
-    p1 = new TablePermission(TEST_TABLE, null, TablePermission.Action.READ);
-    p2 = new TablePermission(TEST_TABLE2, null, TablePermission.Action.READ);
+    p1 = new TablePermission(TEST_TABLE, TablePermission.Action.READ);
+    p2 = new TablePermission(TEST_TABLE2, TablePermission.Action.READ);
     assertFalse(p1.equals(p2));
     assertFalse(p2.equals(p1));
 
-    p2 = new TablePermission(TEST_TABLE, null);
-    assertFalse(p1.equals(p2));
-    assertFalse(p2.equals(p1));
-
-    p1 = new TablePermission(TEST_NAMESPACE, TablePermission.Action.READ);
-    p2 = new TablePermission(TEST_NAMESPACE, TablePermission.Action.READ);
+    p1 = new NamespacePermission(TEST_NAMESPACE, TablePermission.Action.READ);
+    p2 = new NamespacePermission(TEST_NAMESPACE, TablePermission.Action.READ);
     assertEquals(p1, p2);
 
-    p1 = new TablePermission(TEST_NAMESPACE, TablePermission.Action.READ);
-    p2 = new TablePermission(TEST_NAMESPACE2, TablePermission.Action.READ);
+    p1 = new NamespacePermission(TEST_NAMESPACE, TablePermission.Action.READ);
+    p2 = new NamespacePermission(TEST_NAMESPACE2, TablePermission.Action.READ);
     assertFalse(p1.equals(p2));
     assertFalse(p2.equals(p1));
   }
@@ -393,56 +389,58 @@ public class TestTablePermissions {
     // add some permissions
     try (Connection connection = ConnectionFactory.createConnection(conf)) {
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("user1"),
+          new UserPermission("user1",
               Permission.Action.READ, Permission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("user2"),
+          new UserPermission("user2",
               Permission.Action.CREATE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
       addUserPermission(conf,
-          new UserPermission(Bytes.toBytes("user3"),
+          new UserPermission("user3",
               Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.CREATE),
           connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
-    ListMultimap<String,TablePermission> perms = AccessControlLists.getTablePermissions(conf, null);
-    List<TablePermission> user1Perms = perms.get("user1");
+    ListMultimap<String, UserPermission> perms =
+      AccessControlLists.getTablePermissions(conf, null);
+    List<UserPermission> user1Perms = perms.get("user1");
     assertEquals("Should have 1 permission for user1", 1, user1Perms.size());
     assertEquals("user1 should have WRITE permission",
                  new Permission.Action[] { Permission.Action.READ, Permission.Action.WRITE },
-                 user1Perms.get(0).getActions());
+                 user1Perms.get(0).getPermission().getActions());
 
-    List<TablePermission> user2Perms = perms.get("user2");
+    List<UserPermission> user2Perms = perms.get("user2");
     assertEquals("Should have 1 permission for user2", 1, user2Perms.size());
     assertEquals("user2 should have CREATE permission",
                  new Permission.Action[] { Permission.Action.CREATE },
-                 user2Perms.get(0).getActions());
+                 user2Perms.get(0).getPermission().getActions());
 
-    List<TablePermission> user3Perms = perms.get("user3");
+    List<UserPermission> user3Perms = perms.get("user3");
     assertEquals("Should have 1 permission for user3", 1, user3Perms.size());
     assertEquals("user3 should have ADMIN, READ, CREATE permission",
                  new Permission.Action[] {
                     Permission.Action.READ, Permission.Action.CREATE, Permission.Action.ADMIN
                  },
-                 user3Perms.get(0).getActions());
+                 user3Perms.get(0).getPermission().getActions());
   }
 
   @Test
   public void testAuthManager() throws Exception {
     Configuration conf = UTIL.getConfiguration();
-    /* test a race condition causing TableAuthManager to sometimes fail global permissions checks
+    /**
+     * test a race condition causing AuthManager to sometimes fail global permissions checks
      * when the global cache is being updated
      */
-    TableAuthManager authManager = TableAuthManager.getOrCreate(ZKW, conf);
+    AuthManager authManager = AuthManager.getOrCreate(ZKW, conf);
     // currently running user is the system user and should have global admin perms
     User currentUser = User.getCurrent();
-    assertTrue(authManager.authorize(currentUser, Permission.Action.ADMIN));
+    assertTrue(authManager.authorizeUserGlobal(currentUser, Permission.Action.ADMIN));
     try (Connection connection = ConnectionFactory.createConnection(conf)) {
       for (int i=1; i<=50; i++) {
-        addUserPermission(conf, new UserPermission(Bytes.toBytes("testauth"+i),
-            Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.WRITE),
-            connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+        addUserPermission(conf, new UserPermission("testauth"+i,
+          Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.WRITE),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
         // make sure the system user still shows as authorized
         assertTrue("Failed current user auth check on iter "+i,
-            authManager.authorize(currentUser, Permission.Action.ADMIN));
+          authManager.authorizeUserGlobal(currentUser, Permission.Action.ADMIN));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java
index c8ab863..cfd6512 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionWatcher.java
@@ -41,6 +41,9 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
+import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
+
 /**
  * Test the reading and writing of access permissions to and from zookeeper.
  */
@@ -53,8 +56,8 @@ public class TestZKPermissionWatcher {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestZKPermissionWatcher.class);
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static TableAuthManager AUTH_A;
-  private static TableAuthManager AUTH_B;
+  private static AuthManager AUTH_A;
+  private static AuthManager AUTH_B;
   private final static Abortable ABORTABLE = new Abortable() {
     private final AtomicBoolean abort = new AtomicBoolean(false);
 
@@ -81,9 +84,9 @@ public class TestZKPermissionWatcher {
 
     // start minicluster
     UTIL.startMiniCluster();
-    AUTH_A = TableAuthManager.getOrCreate(new ZKWatcher(conf,
+    AUTH_A = AuthManager.getOrCreate(new ZKWatcher(conf,
       "TestZKPermissionsWatcher_1", ABORTABLE), conf);
-    AUTH_B = TableAuthManager.getOrCreate(new ZKWatcher(conf,
+    AUTH_B = AuthManager.getOrCreate(new ZKWatcher(conf,
       "TestZKPermissionsWatcher_2", ABORTABLE), conf);
   }
 
@@ -98,30 +101,25 @@ public class TestZKPermissionWatcher {
     User george = User.createUserForTesting(conf, "george", new String[] { });
     User hubert = User.createUserForTesting(conf, "hubert", new String[] { });
 
-    assertFalse(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertFalse(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-
-    assertFalse(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertFalse(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
+    assertFalse(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertFalse(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
+
+    assertFalse(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertFalse(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
 
     // update ACL: george RW
-    List<TablePermission> acl = new ArrayList<>(1);
-    acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ,
-      TablePermission.Action.WRITE));
+    List<UserPermission> acl = new ArrayList<>(1);
+    acl.add(new UserPermission(george.getShortName(), TEST_TABLE,
+      Permission.Action.READ, Permission.Action.WRITE));
+    ListMultimap<String, UserPermission> multimap = ArrayListMultimap.create();
+    multimap.putAll(george.getShortName(), acl);
+    byte[] serialized = AccessControlLists.writePermissionsAsBytes(multimap, conf);
+    AUTH_A.getZKPermissionWatcher().writeToZookeeper(TEST_TABLE.getName(), serialized);
     final long mtimeB = AUTH_B.getMTime();
-    AUTH_A.setTableUserPermissions(george.getShortName(), TEST_TABLE, acl);
     // Wait for the update to propagate
     UTIL.waitFor(10000, 100, new Predicate<Exception>() {
       @Override
@@ -132,28 +130,22 @@ public class TestZKPermissionWatcher {
     Thread.sleep(1000);
 
     // check it
-    assertTrue(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertTrue(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertTrue(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertTrue(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertFalse(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertFalse(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
+    assertTrue(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertTrue(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertTrue(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertTrue(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertFalse(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
+    assertFalse(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
 
     // update ACL: hubert R
-    acl = new ArrayList<>(1);
-    acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ));
+    List<UserPermission> acl2 = new ArrayList<>(1);
+    acl2.add(new UserPermission(hubert.getShortName(), TEST_TABLE, TablePermission.Action.READ));
     final long mtimeA = AUTH_A.getMTime();
-    AUTH_B.setTableUserPermissions("hubert", TEST_TABLE, acl);
+    multimap.putAll(hubert.getShortName(), acl2);
+    byte[] serialized2 = AccessControlLists.writePermissionsAsBytes(multimap, conf);
+    AUTH_B.getZKPermissionWatcher().writeToZookeeper(TEST_TABLE.getName(), serialized2);
     // Wait for the update to propagate
     UTIL.waitFor(10000, 100, new Predicate<Exception>() {
       @Override
@@ -164,21 +156,13 @@ public class TestZKPermissionWatcher {
     Thread.sleep(1000);
 
     // check it
-    assertTrue(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertTrue(AUTH_A.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertTrue(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertTrue(AUTH_B.authorizeUser(george, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertTrue(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_A.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
-    assertTrue(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.READ));
-    assertFalse(AUTH_B.authorizeUser(hubert, TEST_TABLE, null,
-      TablePermission.Action.WRITE));
+    assertTrue(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertTrue(AUTH_A.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertTrue(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.READ));
+    assertTrue(AUTH_B.authorizeUserTable(george, TEST_TABLE, Permission.Action.WRITE));
+    assertTrue(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_A.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
+    assertTrue(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.READ));
+    assertFalse(AUTH_B.authorizeUserTable(hubert, TEST_TABLE, Permission.Action.WRITE));
   }
 }


[03/51] [abbrv] hbase git commit: HBASE-21255 [acl] Refactor TablePermission into three classes (Global, Namespace, Table)

Posted by el...@apache.org.
HBASE-21255 [acl] Refactor TablePermission into three classes (Global, Namespace, Table)

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/130057f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/130057f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/130057f1

Branch: refs/heads/HBASE-20952
Commit: 130057f13774f6b213cdb06952c805a29d59396e
Parents: 9e42a9e
Author: Reid Chan <re...@apache.org>
Authored: Wed Nov 14 11:12:14 2018 +0800
Committer: Reid Chan <re...@apache.org>
Committed: Thu Nov 15 11:34:16 2018 +0800

----------------------------------------------------------------------
 .../security/access/AccessControlUtil.java      | 202 +++--
 .../hbase/security/access/GlobalPermission.java |  67 ++
 .../security/access/NamespacePermission.java    | 121 +++
 .../hbase/security/access/Permission.java       | 166 ++--
 .../access/ShadedAccessControlUtil.java         | 136 ++--
 .../hbase/security/access/TablePermission.java  | 323 +++-----
 .../hbase/security/access/UserPermission.java   | 191 ++---
 .../java/org/apache/hadoop/hbase/AuthUtil.java  |   2 +-
 .../hbase/rsgroup/TestRSGroupsWithACL.java      |   4 +-
 .../hbase/security/access/AccessChecker.java    |  24 +-
 .../security/access/AccessControlFilter.java    |  16 +-
 .../security/access/AccessControlLists.java     | 206 +++--
 .../hbase/security/access/AccessController.java |  86 +-
 .../hbase/security/access/AuthManager.java      | 608 ++++++++++++++
 .../hbase/security/access/TableAuthManager.java | 787 -------------------
 .../security/access/ZKPermissionWatcher.java    |   6 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  14 +-
 .../snapshot/SnapshotDescriptionUtils.java      |   8 +-
 .../security/access/TestAccessController.java   |  56 +-
 .../security/access/TestAccessController2.java  |  12 +-
 .../security/access/TestAccessController3.java  |   2 +-
 .../security/access/TestNamespaceCommands.java  |  18 +-
 .../security/access/TestRpcAccessChecks.java    |   6 +-
 .../security/access/TestTablePermissions.java   | 190 +++--
 .../access/TestZKPermissionWatcher.java         | 104 ++-
 25 files changed, 1635 insertions(+), 1720 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
index 1b5a70c..b37440c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java
@@ -47,7 +47,7 @@ public class AccessControlUtil {
   private AccessControlUtil() {}
 
   /**
-   * Create a request to grant user permissions.
+   * Create a request to grant user table permissions.
    *
    * @param username the short user name who to grant permissions
    * @param tableName optional table name the permissions apply
@@ -88,7 +88,7 @@ public class AccessControlUtil {
   }
 
   /**
-   * Create a request to grant user permissions.
+   * Create a request to grant user namespace permissions.
    *
    * @param username the short user name who to grant permissions
    * @param namespace optional table name the permissions apply
@@ -119,7 +119,7 @@ public class AccessControlUtil {
   }
 
   /**
-   * Create a request to revoke user permissions.
+   * Create a request to revoke user global permissions.
    *
    * @param username the short user name whose permissions to be revoked
    * @param actions the permissions to be revoked
@@ -145,7 +145,7 @@ public class AccessControlUtil {
   }
 
   /**
-   * Create a request to revoke user permissions.
+   * Create a request to revoke user namespace permissions.
    *
    * @param username the short user name whose permissions to be revoked
    * @param namespace optional table name the permissions apply
@@ -176,7 +176,7 @@ public class AccessControlUtil {
   }
 
   /**
-   * Create a request to grant user permissions.
+   * Create a request to grant user global permissions.
    *
    * @param username the short user name who to grant permissions
    * @param actions the permissions to be granted
@@ -240,23 +240,6 @@ public class AccessControlUtil {
     return result;
   }
 
-
-  /**
-   * Converts a Permission proto to a client Permission object.
-   *
-   * @param proto the protobuf Permission
-   * @return the converted Permission
-   */
-  public static Permission toPermission(AccessControlProtos.Permission proto) {
-    if (proto.getType() != AccessControlProtos.Permission.Type.Global) {
-      return toTablePermission(proto);
-    } else {
-      List<Permission.Action> actions = toPermissionActions(
-          proto.getGlobalPermission().getActionList());
-      return new Permission(actions.toArray(new Permission.Action[actions.size()]));
-    }
-  }
-
   /**
    * Converts a TablePermission proto to a client TablePermission object.
    * @param proto the protobuf TablePermission
@@ -282,48 +265,45 @@ public class AccessControlUtil {
   }
 
   /**
-   * Converts a Permission proto to a client TablePermission object.
+   * Converts a Permission proto to a client Permission object.
    * @param proto the protobuf Permission
-   * @return the converted TablePermission
+   * @return the converted Permission
    */
-  public static TablePermission toTablePermission(AccessControlProtos.Permission proto) {
-    if(proto.getType() == AccessControlProtos.Permission.Type.Global) {
+  public static Permission toPermission(AccessControlProtos.Permission proto) {
+    if (proto.getType() == AccessControlProtos.Permission.Type.Global) {
       AccessControlProtos.GlobalPermission perm = proto.getGlobalPermission();
       List<Permission.Action> actions = toPermissionActions(perm.getActionList());
-
-      return new TablePermission(null, null, null,
-          actions.toArray(new Permission.Action[actions.size()]));
+      return new GlobalPermission(actions.toArray(new Permission.Action[actions.size()]));
     }
-    if(proto.getType() == AccessControlProtos.Permission.Type.Namespace) {
+    if (proto.getType() == AccessControlProtos.Permission.Type.Namespace) {
       AccessControlProtos.NamespacePermission perm = proto.getNamespacePermission();
       List<Permission.Action> actions = toPermissionActions(perm.getActionList());
-
-      if(!proto.hasNamespacePermission()) {
+      if (!proto.hasNamespacePermission()) {
         throw new IllegalStateException("Namespace must not be empty in NamespacePermission");
       }
-      String namespace = perm.getNamespaceName().toStringUtf8();
-      return new TablePermission(namespace, actions.toArray(new Permission.Action[actions.size()]));
+      return new NamespacePermission(perm.getNamespaceName().toStringUtf8(),
+        actions.toArray(new Permission.Action[actions.size()]));
     }
-    if(proto.getType() == AccessControlProtos.Permission.Type.Table) {
+    if (proto.getType() == AccessControlProtos.Permission.Type.Table) {
       AccessControlProtos.TablePermission perm = proto.getTablePermission();
       List<Permission.Action> actions = toPermissionActions(perm.getActionList());
-
       byte[] qualifier = null;
       byte[] family = null;
       TableName table = null;
-
       if (!perm.hasTableName()) {
         throw new IllegalStateException("TableName cannot be empty");
       }
       table = ProtobufUtil.toTableName(perm.getTableName());
-
-      if (perm.hasFamily()) family = perm.getFamily().toByteArray();
-      if (perm.hasQualifier()) qualifier = perm.getQualifier().toByteArray();
-
+      if (perm.hasFamily()) {
+        family = perm.getFamily().toByteArray();
+      }
+      if (perm.hasQualifier()) {
+        qualifier = perm.getQualifier().toByteArray();
+      }
       return new TablePermission(table, family, qualifier,
-          actions.toArray(new Permission.Action[actions.size()]));
+        actions.toArray(new Permission.Action[actions.size()]));
     }
-    throw new IllegalStateException("Unrecognize Perm Type: "+proto.getType());
+    throw new IllegalStateException("Unrecognize Perm Type: " + proto.getType());
   }
 
   /**
@@ -334,56 +314,51 @@ public class AccessControlUtil {
    */
   public static AccessControlProtos.Permission toPermission(Permission perm) {
     AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder();
-    if (perm instanceof TablePermission) {
-      TablePermission tablePerm = (TablePermission)perm;
-      if(tablePerm.hasNamespace()) {
-        ret.setType(AccessControlProtos.Permission.Type.Namespace);
-
-        AccessControlProtos.NamespacePermission.Builder builder =
-            AccessControlProtos.NamespacePermission.newBuilder();
-        builder.setNamespaceName(ByteString.copyFromUtf8(tablePerm.getNamespace()));
-        Permission.Action[] actions = perm.getActions();
-        if (actions != null) {
-          for (Permission.Action a : actions) {
-            builder.addAction(toPermissionAction(a));
-          }
-        }
-        ret.setNamespacePermission(builder);
-        return ret.build();
-      } else if (tablePerm.hasTable()) {
-        ret.setType(AccessControlProtos.Permission.Type.Table);
-
-        AccessControlProtos.TablePermission.Builder builder =
-            AccessControlProtos.TablePermission.newBuilder();
-        builder.setTableName(ProtobufUtil.toProtoTableName(tablePerm.getTableName()));
-        if (tablePerm.hasFamily()) {
-          builder.setFamily(ByteStringer.wrap(tablePerm.getFamily()));
-        }
-        if (tablePerm.hasQualifier()) {
-          builder.setQualifier(ByteStringer.wrap(tablePerm.getQualifier()));
+    if (perm instanceof NamespacePermission) {
+      NamespacePermission namespace = (NamespacePermission) perm;
+      ret.setType(AccessControlProtos.Permission.Type.Namespace);
+      AccessControlProtos.NamespacePermission.Builder builder =
+        AccessControlProtos.NamespacePermission.newBuilder();
+      builder.setNamespaceName(ByteString.copyFromUtf8(namespace.getNamespace()));
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a : actions) {
+          builder.addAction(toPermissionAction(a));
         }
-        Permission.Action actions[] = perm.getActions();
-        if (actions != null) {
-          for (Permission.Action a : actions) {
-            builder.addAction(toPermissionAction(a));
-          }
+      }
+      ret.setNamespacePermission(builder);
+    } else if (perm instanceof TablePermission) {
+      TablePermission table = (TablePermission) perm;
+      ret.setType(AccessControlProtos.Permission.Type.Table);
+      AccessControlProtos.TablePermission.Builder builder =
+        AccessControlProtos.TablePermission.newBuilder();
+      builder.setTableName(ProtobufUtil.toProtoTableName(table.getTableName()));
+      if (table.hasFamily()) {
+        builder.setFamily(ByteStringer.wrap(table.getFamily()));
+      }
+      if (table.hasQualifier()) {
+        builder.setQualifier(ByteStringer.wrap(table.getQualifier()));
+      }
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a : actions) {
+          builder.addAction(toPermissionAction(a));
         }
-        ret.setTablePermission(builder);
-        return ret.build();
       }
-    }
-
-    ret.setType(AccessControlProtos.Permission.Type.Global);
-
-    AccessControlProtos.GlobalPermission.Builder builder =
+      ret.setTablePermission(builder);
+    } else {
+      // perm instanceof GlobalPermission
+      ret.setType(AccessControlProtos.Permission.Type.Global);
+      AccessControlProtos.GlobalPermission.Builder builder =
         AccessControlProtos.GlobalPermission.newBuilder();
-    Permission.Action actions[] = perm.getActions();
-    if (actions != null) {
-      for (Permission.Action a: actions) {
-        builder.addAction(toPermissionAction(a));
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a: actions) {
+          builder.addAction(toPermissionAction(a));
+        }
       }
+      ret.setGlobalPermission(builder);
     }
-    ret.setGlobalPermission(builder);
     return ret.build();
   }
 
@@ -456,8 +431,8 @@ public class AccessControlUtil {
    */
   public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) {
     return AccessControlProtos.UserPermission.newBuilder()
-        .setUser(ByteStringer.wrap(perm.getUser()))
-        .setPermission(toPermission(perm))
+        .setUser(ByteString.copyFromUtf8(perm.getUser()))
+        .setPermission(toPermission(perm.getPermission()))
         .build();
   }
 
@@ -480,8 +455,7 @@ public class AccessControlUtil {
    * @return the converted UserPermission
    */
   public static UserPermission toUserPermission(AccessControlProtos.UserPermission proto) {
-    return new UserPermission(proto.getUser().toByteArray(),
-        toTablePermission(proto.getPermission()));
+    return new UserPermission(proto.getUser().toStringUtf8(), toPermission(proto.getPermission()));
   }
 
   /**
@@ -492,15 +466,15 @@ public class AccessControlUtil {
    * @return the protobuf UserTablePermissions
    */
   public static AccessControlProtos.UsersAndPermissions toUserTablePermissions(
-      ListMultimap<String, TablePermission> perm) {
+      ListMultimap<String, UserPermission> perm) {
     AccessControlProtos.UsersAndPermissions.Builder builder =
         AccessControlProtos.UsersAndPermissions.newBuilder();
-    for (Map.Entry<String, Collection<TablePermission>> entry : perm.asMap().entrySet()) {
+    for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) {
       AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder =
           AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder();
       userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
-      for (TablePermission tablePerm: entry.getValue()) {
-        userPermBuilder.addPermissions(toPermission(tablePerm));
+      for (UserPermission userPerm: entry.getValue()) {
+        userPermBuilder.addPermissions(toPermission(userPerm.getPermission()));
       }
       builder.addUserPermissions(userPermBuilder.build());
     }
@@ -844,28 +818,46 @@ public class AccessControlUtil {
   }
 
   /**
-   * Convert a protobuf UserTablePermissions to a
-   * ListMultimap&lt;String, TablePermission&gt; where key is username.
-   *
-   * @param proto the protobuf UserPermission
-   * @return the converted UserPermission
+   * Convert a protobuf UserTablePermissions to a ListMultimap&lt;Username, UserPermission&gt
+   * @param proto the proto UsersAndPermissions
+   * @return a ListMultimap with user and its permissions
+   */
+  public static ListMultimap<String, UserPermission> toUserPermission(
+      AccessControlProtos.UsersAndPermissions proto) {
+    ListMultimap<String, UserPermission> userPermission = ArrayListMultimap.create();
+    AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
+    for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
+      userPerm = proto.getUserPermissions(i);
+      String username = userPerm.getUser().toStringUtf8();
+      for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
+        userPermission.put(username,
+          new UserPermission(username, toPermission(userPerm.getPermissions(j))));
+      }
+    }
+    return userPermission;
+  }
+
+  /**
+   * Convert a protobuf UserTablePermissions to a ListMultimap&lt;Username, Permission&gt
+   * @param proto the proto UsersAndPermissions
+   * @return a ListMultimap with user and its permissions
    */
-  public static ListMultimap<String, TablePermission> toUserTablePermissions(
+  public static ListMultimap<String, Permission> toPermission(
       AccessControlProtos.UsersAndPermissions proto) {
-    ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
+    ListMultimap<String, Permission> perms = ArrayListMultimap.create();
     AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
     for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
       userPerm = proto.getUserPermissions(i);
+      String username = userPerm.getUser().toStringUtf8();
       for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
-        TablePermission tablePerm = toTablePermission(userPerm.getPermissions(j));
-        perms.put(userPerm.getUser().toStringUtf8(), tablePerm);
+        perms.put(username, toPermission(userPerm.getPermissions(j)));
       }
     }
     return perms;
   }
 
   /**
-   * Create a request to revoke user permissions.
+   * Create a request to revoke user table permissions.
    *
    * @param username the short user name whose permissions to be revoked
    * @param tableName optional table name the permissions apply

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java
new file mode 100644
index 0000000..b29317a
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Represents an authorization for access whole cluster.
+ */
+@InterfaceAudience.Private
+public class GlobalPermission extends Permission {
+
+  /** Default constructor for Writable, do not use */
+  public GlobalPermission() {
+    super();
+    this.scope = Scope.EMPTY;
+  }
+
+  /**
+   * Construct a global permission.
+   * @param assigned assigned actions
+   */
+  GlobalPermission(Action... assigned) {
+    super(assigned);
+    this.scope = Scope.GLOBAL;
+  }
+
+  /**
+   * Construct a global permission.
+   * @param actionCode assigned actions
+   */
+  GlobalPermission(byte[] actionCode) {
+    super(actionCode);
+    this.scope = Scope.GLOBAL;
+  }
+
+  @Override
+  public int hashCode() {
+    return super.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    return obj instanceof GlobalPermission && super.equals(obj);
+  }
+
+  @Override
+  public String toString() {
+    return "[GlobalPermission: " + rawExpression() + "]";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java
new file mode 100644
index 0000000..c7ede96
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java
@@ -0,0 +1,121 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Represents an authorization for access for the given namespace.
+ */
+@InterfaceAudience.Private
+public class NamespacePermission extends Permission {
+
+  private String namespace = NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR;
+
+  /** Default constructor for Writable, do not use */
+  public NamespacePermission() {
+    super();
+    this.scope = Scope.EMPTY;
+  }
+
+  /**
+   * Construct a namespace permission.
+   * @param namespace namespace's name
+   * @param assigned assigned actions
+   */
+  public NamespacePermission(String namespace, Action... assigned) {
+    super(assigned);
+    this.namespace = namespace;
+    this.scope = Scope.NAMESPACE;
+  }
+
+  /**
+   * Construct a namespace permission.
+   * @param namespace namespace's name
+   * @param actionCode assigned actions
+   */
+  public NamespacePermission(String namespace, byte[] actionCode) {
+    super(actionCode);
+    this.namespace = namespace;
+    this.scope = Scope.NAMESPACE;
+  }
+
+  public String getNamespace() {
+    return namespace;
+  }
+
+  /**
+   * check if given action is granted in given namespace.
+   * @param namespace namespace's name
+   * @param action action to be checked
+   * @return true if granted, false otherwise
+   */
+  public boolean implies(String namespace, Action action) {
+    return namespace.equals(this.namespace) && implies(action);
+  }
+
+  @Override
+  public boolean equalsExceptActions(Object obj) {
+    if (!(obj instanceof NamespacePermission)) {
+      return false;
+    }
+    NamespacePermission gp = (NamespacePermission) obj;
+    return namespace.equals(gp.namespace);
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(namespace) + super.hashCode();
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    return equalsExceptActions(obj) && super.equals(obj);
+  }
+
+  @Override
+  public String toString() {
+    return "[NamespacePermission: " + rawExpression() + "]";
+  }
+
+  @Override
+  protected String rawExpression() {
+    StringBuilder raw = new StringBuilder("namespace=").append(namespace).append(", ");
+    return raw.toString() + super.rawExpression();
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    namespace = Bytes.toString(Bytes.readByteArray(in));
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    Bytes.writeByteArray(out, Bytes.toBytes(namespace));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index 1e9e60c..d448d3a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -22,6 +22,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.Map;
 
 import org.apache.yetus.audience.InterfaceAudience;
@@ -30,7 +31,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.VersionedWritable;
 
-import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
 
 /**
  * Base permissions instance representing the ability to perform a given set
@@ -48,21 +49,49 @@ public class Permission extends VersionedWritable {
 
     private final byte code;
     Action(char code) {
-      this.code = (byte)code;
+      this.code = (byte) code;
     }
 
     public byte code() { return code; }
   }
 
+  @InterfaceAudience.Private
+  protected enum Scope {
+    GLOBAL('G'), NAMESPACE('N'), TABLE('T'), EMPTY('E');
+
+    private final byte code;
+    Scope(char code) {
+      this.code = (byte) code;
+    }
+
+    public byte code() {
+      return code;
+    }
+  }
+
   private static final Logger LOG = LoggerFactory.getLogger(Permission.class);
-  protected static final Map<Byte,Action> ACTION_BY_CODE = Maps.newHashMap();
 
-  protected Action[] actions;
+  protected static final Map<Byte, Action> ACTION_BY_CODE;
+  protected static final Map<Byte, Scope> SCOPE_BY_CODE;
+
+  protected EnumSet<Action> actions = EnumSet.noneOf(Action.class);
+  protected Scope scope = Scope.EMPTY;
 
   static {
-    for (Action a : Action.values()) {
-      ACTION_BY_CODE.put(a.code(), a);
-    }
+    ACTION_BY_CODE = ImmutableMap.of(
+      Action.READ.code, Action.READ,
+      Action.WRITE.code, Action.WRITE,
+      Action.EXEC.code, Action.EXEC,
+      Action.CREATE.code, Action.CREATE,
+      Action.ADMIN.code, Action.ADMIN
+    );
+
+    SCOPE_BY_CODE = ImmutableMap.of(
+      Scope.GLOBAL.code, Scope.GLOBAL,
+      Scope.NAMESPACE.code, Scope.NAMESPACE,
+      Scope.TABLE.code, Scope.TABLE,
+      Scope.EMPTY.code, Scope.EMPTY
+    );
   }
 
   /** Empty constructor for Writable implementation.  <b>Do not use.</b> */
@@ -72,75 +101,72 @@ public class Permission extends VersionedWritable {
 
   public Permission(Action... assigned) {
     if (assigned != null && assigned.length > 0) {
-      actions = Arrays.copyOf(assigned, assigned.length);
+      actions.addAll(Arrays.asList(assigned));
     }
   }
 
   public Permission(byte[] actionCodes) {
     if (actionCodes != null) {
-      Action acts[] = new Action[actionCodes.length];
-      int j = 0;
-      for (int i=0; i<actionCodes.length; i++) {
-        byte b = actionCodes[i];
-        Action a = ACTION_BY_CODE.get(b);
-        if (a == null) {
-          LOG.error("Ignoring unknown action code '"+
-              Bytes.toStringBinary(new byte[]{b})+"'");
+      for (byte code : actionCodes) {
+        Action action = ACTION_BY_CODE.get(code);
+        if (action == null) {
+          LOG.error("Ignoring unknown action code '" +
+            Bytes.toStringBinary(new byte[] { code }) + "'");
           continue;
         }
-        acts[j++] = a;
+        actions.add(action);
       }
-      this.actions = Arrays.copyOf(acts, j);
     }
   }
 
   public Action[] getActions() {
-    return actions;
+    return actions.toArray(new Action[actions.size()]);
   }
 
+  /**
+   * check if given action is granted
+   * @param action action to be checked
+   * @return true if granted, false otherwise
+   */
   public boolean implies(Action action) {
-    if (this.actions != null) {
-      for (Action a : this.actions) {
-        if (a == action) {
-          return true;
-        }
-      }
-    }
-
-    return false;
+    return actions.contains(action);
   }
 
   public void setActions(Action[] assigned) {
     if (assigned != null && assigned.length > 0) {
-      actions = Arrays.copyOf(assigned, assigned.length);
+      // setActions should cover the previous actions,
+      // so we call clear here.
+      actions.clear();
+      actions.addAll(Arrays.asList(assigned));
     }
   }
 
+  /**
+   * Check if two permission equals regardless of actions. It is useful when
+   * merging a new permission with an existed permission which needs to check two permissions's
+   * fields.
+   * @param obj instance
+   * @return true if equals, false otherwise
+   */
+  public boolean equalsExceptActions(Object obj) {
+    return obj instanceof Permission;
+  }
+
   @Override
   public boolean equals(Object obj) {
     if (!(obj instanceof Permission)) {
       return false;
     }
-    Permission other = (Permission)obj;
-    // check actions
-    if (actions == null && other.getActions() == null) {
-      return true;
-    } else if (actions != null && other.getActions() != null) {
-      Action[] otherActions = other.getActions();
-      if (actions.length != otherActions.length) {
-        return false;
-      }
 
-      outer:
-      for (Action a : actions) {
-        for (Action oa : otherActions) {
-          if (a == oa) continue outer;
-        }
+    Permission other = (Permission) obj;
+    if (actions.isEmpty() && other.actions.isEmpty()) {
+      return true;
+    } else if (!actions.isEmpty() && !other.actions.isEmpty()) {
+      if (actions.size() != other.actions.size()) {
         return false;
       }
-      return true;
+      return actions.containsAll(other.actions);
     }
-
     return false;
   }
 
@@ -151,26 +177,28 @@ public class Permission extends VersionedWritable {
     for (Action a : actions) {
       result = prime * result + a.code();
     }
+    result = prime * result + scope.code();
     return result;
   }
 
   @Override
   public String toString() {
-    StringBuilder str = new StringBuilder("[Permission: ")
-        .append("actions=");
+    return "[Permission: " + rawExpression() + "]";
+  }
+
+  protected String rawExpression() {
+    StringBuilder raw = new StringBuilder("actions=");
     if (actions != null) {
-      for (int i=0; i<actions.length; i++) {
-        if (i > 0)
-          str.append(",");
-        if (actions[i] != null)
-          str.append(actions[i].toString());
-        else
-          str.append("NULL");
+      int i = 0;
+      for (Action action : actions) {
+        if (i > 0) {
+          raw.append(",");
+        }
+        raw.append(action != null ? action.toString() : "NULL");
+        i++;
       }
     }
-    str.append("]");
-
-    return str.toString();
+    return raw.toString();
   }
 
   /** @return the object version number */
@@ -182,31 +210,35 @@ public class Permission extends VersionedWritable {
   @Override
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
-    int length = (int)in.readByte();
+    int length = (int) in.readByte();
+    actions = EnumSet.noneOf(Action.class);
     if (length > 0) {
-      actions = new Action[length];
       for (int i = 0; i < length; i++) {
         byte b = in.readByte();
-        Action a = ACTION_BY_CODE.get(b);
-        if (a == null) {
-          throw new IOException("Unknown action code '"+
-              Bytes.toStringBinary(new byte[]{b})+"' in input");
+        Action action = ACTION_BY_CODE.get(b);
+        if (action == null) {
+          throw new IOException("Unknown action code '" +
+            Bytes.toStringBinary(new byte[] { b }) + "' in input");
         }
-        this.actions[i] = a;
+        actions.add(action);
       }
-    } else {
-      actions = new Action[0];
     }
+    scope = SCOPE_BY_CODE.get(in.readByte());
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
-    out.writeByte(actions != null ? actions.length : 0);
+    out.writeByte(actions != null ? actions.size() : 0);
     if (actions != null) {
       for (Action a: actions) {
         out.writeByte(a.code());
       }
     }
+    out.writeByte(scope.code());
+  }
+
+  public Scope getAccessScope() {
+    return scope;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java
index 5a94805..7e36656 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java
@@ -119,14 +119,13 @@ public class ShadedAccessControlUtil {
    * @param proto the protobuf Permission
    * @return the converted TablePermission
    */
-  public static TablePermission toTablePermission(AccessControlProtos.Permission proto) {
+  public static Permission toPermission(AccessControlProtos.Permission proto) {
 
     if (proto.getType() == AccessControlProtos.Permission.Type.Global) {
       AccessControlProtos.GlobalPermission perm = proto.getGlobalPermission();
       List<Action> actions = toPermissionActions(perm.getActionList());
 
-      return new TablePermission(null, null, null,
-          actions.toArray(new Permission.Action[actions.size()]));
+      return new GlobalPermission(actions.toArray(new Permission.Action[actions.size()]));
     }
     if (proto.getType() == AccessControlProtos.Permission.Type.Namespace) {
       AccessControlProtos.NamespacePermission perm = proto.getNamespacePermission();
@@ -135,8 +134,8 @@ public class ShadedAccessControlUtil {
       if (!proto.hasNamespacePermission()) {
         throw new IllegalStateException("Namespace must not be empty in NamespacePermission");
       }
-      String namespace = perm.getNamespaceName().toStringUtf8();
-      return new TablePermission(namespace, actions.toArray(new Permission.Action[actions.size()]));
+      String ns = perm.getNamespaceName().toStringUtf8();
+      return new NamespacePermission(ns, actions.toArray(new Permission.Action[actions.size()]));
     }
     if (proto.getType() == AccessControlProtos.Permission.Type.Table) {
       AccessControlProtos.TablePermission perm = proto.getTablePermission();
@@ -144,12 +143,11 @@ public class ShadedAccessControlUtil {
 
       byte[] qualifier = null;
       byte[] family = null;
-      TableName table = null;
 
       if (!perm.hasTableName()) {
         throw new IllegalStateException("TableName cannot be empty");
       }
-      table = toTableName(perm.getTableName());
+      TableName table = toTableName(perm.getTableName());
 
       if (perm.hasFamily()) family = perm.getFamily().toByteArray();
       if (perm.hasQualifier()) qualifier = perm.getQualifier().toByteArray();
@@ -170,63 +168,58 @@ public class ShadedAccessControlUtil {
     org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Builder ret =
         org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission
             .newBuilder();
-    if (perm instanceof TablePermission) {
-      TablePermission tablePerm = (TablePermission) perm;
-      if (tablePerm.hasNamespace()) {
-        ret.setType(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Namespace);
-
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission.Builder builder =
-            org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission
-                .newBuilder();
-        builder.setNamespaceName(org.apache.hbase.thirdparty.com.google.protobuf.ByteString
-            .copyFromUtf8(tablePerm.getNamespace()));
-        Permission.Action[] actions = perm.getActions();
-        if (actions != null) {
-          for (Permission.Action a : actions) {
-            builder.addAction(toPermissionAction(a));
-          }
-        }
-        ret.setNamespacePermission(builder);
-        return ret.build();
-      } else if (tablePerm.hasTable()) {
-        ret.setType(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Table);
-
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission.Builder builder =
-            org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission
-                .newBuilder();
-        builder.setTableName(toProtoTableName(tablePerm.getTableName()));
-        if (tablePerm.hasFamily()) {
-          builder.setFamily(ByteString.copyFrom(tablePerm.getFamily()));
-        }
-        if (tablePerm.hasQualifier()) {
-          builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier()));
+    if (perm instanceof NamespacePermission) {
+      NamespacePermission nsPerm = (NamespacePermission) perm;
+      ret.setType(
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Namespace);
+      org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission.Builder builder =
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.NamespacePermission
+          .newBuilder();
+      builder.setNamespaceName(org.apache.hbase.thirdparty.com.google.protobuf.ByteString
+        .copyFromUtf8(nsPerm.getNamespace()));
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a : actions) {
+          builder.addAction(toPermissionAction(a));
         }
-        Permission.Action actions[] = perm.getActions();
-        if (actions != null) {
-          for (Permission.Action a : actions) {
-            builder.addAction(toPermissionAction(a));
-          }
+      }
+      ret.setNamespacePermission(builder);
+    } else if (perm instanceof TablePermission) {
+      TablePermission tablePerm = (TablePermission) perm;
+      ret.setType(
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Table);
+      org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission.Builder builder =
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.TablePermission
+          .newBuilder();
+      builder.setTableName(toProtoTableName(tablePerm.getTableName()));
+      if (tablePerm.hasFamily()) {
+        builder.setFamily(ByteString.copyFrom(tablePerm.getFamily()));
+      }
+      if (tablePerm.hasQualifier()) {
+        builder.setQualifier(ByteString.copyFrom(tablePerm.getQualifier()));
+      }
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a : actions) {
+          builder.addAction(toPermissionAction(a));
         }
-        ret.setTablePermission(builder);
-        return ret.build();
       }
-    }
-
-    ret.setType(
-      org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Global);
-
-    org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission.Builder builder =
+      ret.setTablePermission(builder);
+    } else {
+      // perm.getAccessScope() == Permission.Scope.GLOBAL
+      ret.setType(
+        org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.Permission.Type.Global);
+      org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission.Builder builder =
         org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GlobalPermission
-            .newBuilder();
-    Permission.Action actions[] = perm.getActions();
-    if (actions != null) {
-      for (Permission.Action a : actions) {
-        builder.addAction(toPermissionAction(a));
+          .newBuilder();
+      Permission.Action[] actions = perm.getActions();
+      if (actions != null) {
+        for (Permission.Action a : actions) {
+          builder.addAction(toPermissionAction(a));
+        }
       }
+      ret.setGlobalPermission(builder);
     }
-    ret.setGlobalPermission(builder);
     return ret.build();
   }
 
@@ -236,15 +229,15 @@ public class ShadedAccessControlUtil {
    * @param proto the protobuf UserPermission
    * @return the converted UserPermission
    */
-  public static ListMultimap<String, TablePermission> toUserTablePermissions(
+  public static ListMultimap<String, Permission> toUserTablePermissions(
       org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions proto) {
-    ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
+    ListMultimap<String, Permission> perms = ArrayListMultimap.create();
     org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions userPerm;
     for (int i = 0; i < proto.getUserPermissionsCount(); i++) {
       userPerm = proto.getUserPermissions(i);
       for (int j = 0; j < userPerm.getPermissionsCount(); j++) {
-        TablePermission tablePerm = toTablePermission(userPerm.getPermissions(j));
-        perms.put(userPerm.getUser().toStringUtf8(), tablePerm);
+        Permission perm = toPermission(userPerm.getPermissions(j));
+        perms.put(userPerm.getUser().toStringUtf8(), perm);
       }
     }
     return perms;
@@ -258,31 +251,20 @@ public class ShadedAccessControlUtil {
    */
   public static
       org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions
-      toUserTablePermissions(ListMultimap<String, TablePermission> perm) {
+      toUserTablePermissions(ListMultimap<String, UserPermission> perm) {
     org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.Builder builder =
         org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions
             .newBuilder();
-    for (Map.Entry<String, Collection<TablePermission>> entry : perm.asMap().entrySet()) {
+    for (Map.Entry<String, Collection<UserPermission>> entry : perm.asMap().entrySet()) {
       org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder =
           org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UsersAndPermissions.UserPermissions
               .newBuilder();
       userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey()));
-      for (TablePermission tablePerm : entry.getValue()) {
-        userPermBuilder.addPermissions(toPermission(tablePerm));
+      for (UserPermission userPerm : entry.getValue()) {
+        userPermBuilder.addPermissions(toPermission(userPerm.getPermission()));
       }
       builder.addUserPermissions(userPermBuilder.build());
     }
     return builder.build();
   }
-
-  /**
-   * Converts a user permission proto to a client user permission object.
-   *
-   * @param proto the protobuf UserPermission
-   * @return the converted UserPermission
-   */
-  public static UserPermission toUserPermission(org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.UserPermission proto) {
-    return new UserPermission(proto.getUser().toByteArray(),
-        toTablePermission(proto.getPermission()));
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
index dd0e71d..36ed8e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 /**
  * Represents an authorization for access for the given actions, optionally
  * restricted to the given column family or column qualifier, over the
- * given table.  If the family property is <code>null</code>, it implies
+ * given table. If the family property is <code>null</code>, it implies
  * full table access.
  */
 @InterfaceAudience.Private
@@ -41,114 +41,78 @@ public class TablePermission extends Permission {
   private byte[] family;
   private byte[] qualifier;
 
-  //TODO refactor this class
-  //we need to refacting this into three classes (Global, Table, Namespace)
-  private String namespace;
-
   /** Nullary constructor for Writable, do not use */
   public TablePermission() {
     super();
+    this.scope = Scope.EMPTY;
+  }
+
+  /**
+   * Construct a table permission.
+   * @param table table name
+   * @param assigned assigned actions
+   */
+  public TablePermission(TableName table, Action... assigned) {
+    this(table, null, null, assigned);
   }
 
   /**
-   * Create a new permission for the given table and (optionally) column family,
-   * allowing the given actions.
-   * @param table the table
-   * @param family the family, can be null if a global permission on the table
-   * @param assigned the list of allowed actions
+   * Construct a table:family permission.
+   * @param table table name
+   * @param family family name
+   * @param assigned assigned actions
    */
   public TablePermission(TableName table, byte[] family, Action... assigned) {
     this(table, family, null, assigned);
   }
 
   /**
-   * Creates a new permission for the given table, restricted to the given
-   * column family and qualifier, allowing the assigned actions to be performed.
-   * @param table the table
-   * @param family the family, can be null if a global permission on the table
-   * @param assigned the list of allowed actions
+   * Construct a table:family:qualifier permission.
+   * @param table table name
+   * @param family family name
+   * @param qualifier qualifier name
+   * @param assigned assigned actions
    */
-  public TablePermission(TableName table, byte[] family, byte[] qualifier,
-      Action... assigned) {
+  public TablePermission(TableName table, byte[] family, byte[] qualifier, Action... assigned) {
     super(assigned);
     this.table = table;
     this.family = family;
     this.qualifier = qualifier;
+    this.scope = Scope.TABLE;
   }
 
   /**
-   * Creates a new permission for the given table, family and column qualifier,
-   * allowing the actions matching the provided byte codes to be performed.
-   * @param table the table
-   * @param family the family, can be null if a global permission on the table
-   * @param actionCodes the list of allowed action codes
+   * Construct a table permission.
+   * @param table table name
+   * @param actionCodes assigned actions
    */
-  public TablePermission(TableName table, byte[] family, byte[] qualifier,
-      byte[] actionCodes) {
-    super(actionCodes);
-    this.table = table;
-    this.family = family;
-    this.qualifier = qualifier;
+  public TablePermission(TableName table, byte[] actionCodes) {
+    this(table, null, null, actionCodes);
   }
 
   /**
-   * Creates a new permission for the given namespace or table, restricted to the given
-   * column family and qualifier, allowing the assigned actions to be performed.
-   * @param namespace
-   * @param table the table
-   * @param family the family, can be null if a global permission on the table
-   * @param assigned the list of allowed actions
+   * Construct a table:family permission.
+   * @param table table name
+   * @param family family name
+   * @param actionCodes assigned actions
    */
-  public TablePermission(String namespace, TableName table, byte[] family, byte[] qualifier,
-      Action... assigned) {
-    super(assigned);
-    this.namespace = namespace;
-    this.table = table;
-    this.family = family;
-    this.qualifier = qualifier;
+  public TablePermission(TableName table, byte[] family, byte[] actionCodes) {
+    this(table, family, null, actionCodes);
   }
 
   /**
-   * Creates a new permission for the given namespace or table, family and column qualifier,
-   * allowing the actions matching the provided byte codes to be performed.
-   * @param namespace
-   * @param table the table
-   * @param family the family, can be null if a global permission on the table
-   * @param actionCodes the list of allowed action codes
+   * Construct a table:family:qualifier permission.
+   * @param table table name
+   * @param family family name
+   * @param qualifier qualifier name
+   * @param actionCodes assigned actions
    */
-  public TablePermission(String namespace, TableName table, byte[] family, byte[] qualifier,
-      byte[] actionCodes) {
+  public TablePermission(TableName table, byte[] family, byte[] qualifier, byte[] actionCodes) {
     super(actionCodes);
-    this.namespace = namespace;
     this.table = table;
     this.family = family;
     this.qualifier = qualifier;
-  }
-
-  /**
-   * Creates a new permission for the given namespace,
-   * allowing the actions matching the provided byte codes to be performed.
-   * @param namespace
-   * @param actionCodes the list of allowed action codes
-   */
-  public TablePermission(String namespace, byte[] actionCodes) {
-    super(actionCodes);
-    this.namespace = namespace;
-  }
-
-  /**
-   * Create a new permission for the given namespace,
-   * allowing the given actions.
-   * @param namespace
-   * @param assigned the list of allowed actions
-   */
-  public TablePermission(String namespace, Action... assigned) {
-    super(assigned);
-    this.namespace = namespace;
-  }
-
-  public boolean hasTable() {
-    return table != null;
+    this.scope = Scope.TABLE;
   }
 
   public TableName getTableName() {
@@ -175,65 +139,58 @@ public class TablePermission extends Permission {
     return qualifier;
   }
 
-  public boolean hasNamespace() {
-    return namespace != null;
-  }
-
   public String getNamespace() {
-    return namespace;
+    return table.getNamespaceAsString();
   }
 
   /**
-   * Checks that a given table operation is authorized by this permission
-   * instance.
-   *
-   * @param namespace the namespace where the operation is being performed
-   * @param action the action being requested
-   * @return <code>true</code> if the action within the given scope is allowed
-   *   by this permission, <code>false</code>
+   * Check if given action can performs on given table:family:qualifier.
+   * @param table table name
+   * @param family family name
+   * @param qualifier qualifier name
+   * @param action one of [Read, Write, Create, Exec, Admin]
+   * @return true if can, false otherwise
    */
-  public boolean implies(String namespace, Action action) {
-    if (this.namespace == null || !this.namespace.equals(namespace)) {
+  public boolean implies(TableName table, byte[] family, byte[] qualifier, Action action) {
+    if (failCheckTable(table)) {
       return false;
     }
-
-    // check actions
-    return super.implies(action);
+    if (failCheckFamily(family)) {
+      return false;
+    }
+    if (failCheckQualifier(qualifier)) {
+      return false;
+    }
+    return implies(action);
   }
 
   /**
-   * Checks that a given table operation is authorized by this permission
-   * instance.
-   *
-   * @param table the table where the operation is being performed
-   * @param family the column family to which the operation is restricted,
-   *   if <code>null</code> implies "all"
-   * @param qualifier the column qualifier to which the action is restricted,
-   *   if <code>null</code> implies "all"
-   * @param action the action being requested
-   * @return <code>true</code> if the action within the given scope is allowed
-   *   by this permission, <code>false</code>
+   * Check if given action can performs on given table:family.
+   * @param table table name
+   * @param family family name
+   * @param action one of [Read, Write, Create, Exec, Admin]
+   * @return true if can, false otherwise
    */
-  public boolean implies(TableName table, byte[] family, byte[] qualifier,
-      Action action) {
-    if (this.table == null || !this.table.equals(table)) {
+  public boolean implies(TableName table, byte[] family, Action action) {
+    if (failCheckTable(table)) {
       return false;
     }
-
-    if (this.family != null &&
-        (family == null ||
-         !Bytes.equals(this.family, family))) {
+    if (failCheckFamily(family)) {
       return false;
     }
+    return implies(action);
+  }
 
-    if (this.qualifier != null &&
-        (qualifier == null ||
-         !Bytes.equals(this.qualifier, qualifier))) {
-      return false;
-    }
+  private boolean failCheckTable(TableName table) {
+    return this.table == null || !this.table.equals(table);
+  }
 
-    // check actions
-    return super.implies(action);
+  private boolean failCheckFamily(byte[] family) {
+    return this.family != null && (family == null || !Bytes.equals(this.family, family));
+  }
+
+  private boolean failCheckQualifier(byte[] qual) {
+    return this.qualifier != null && (qual == null || !Bytes.equals(this.qualifier, qual));
   }
 
   /**
@@ -246,7 +203,7 @@ public class TablePermission extends Permission {
    *   by this permission, otherwise <code>false</code>
    */
   public boolean implies(TableName table, KeyValue kv, Action action) {
-    if (this.table == null || !this.table.equals(table)) {
+    if (failCheckTable(table)) {
       return false;
     }
 
@@ -263,82 +220,34 @@ public class TablePermission extends Permission {
   }
 
   /**
-   * Returns <code>true</code> if this permission matches the given column
-   * family at least.  This only indicates a partial match against the table
-   * and column family, however, and does not guarantee that implies() for the
-   * column same family would return <code>true</code>.  In the case of a
-   * column-qualifier specific permission, for example, implies() would still
-   * return false.
+   * Check if fields of table in table permission equals.
+   * @param tp to be checked table permission
+   * @return true if equals, false otherwise
    */
-  public boolean matchesFamily(TableName table, byte[] family, Action action) {
-    if (this.table == null || !this.table.equals(table)) {
+  public boolean tableFieldsEqual(TablePermission tp) {
+    if (tp == null) {
       return false;
     }
 
-    if (this.family != null &&
-        (family == null ||
-         !Bytes.equals(this.family, family))) {
-      return false;
-    }
-
-    // ignore qualifier
-    // check actions
-    return super.implies(action);
+    boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table));
+    boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family);
+    boolean qEq = (qualifier == null && tp.qualifier == null) ||
+                   Bytes.equals(qualifier, tp.qualifier);
+    return tEq && fEq && qEq;
   }
 
-  /**
-   * Returns if the given permission matches the given qualifier.
-   * @param table the table name to match
-   * @param family the column family to match
-   * @param qualifier the qualifier name to match
-   * @param action the action requested
-   * @return <code>true</code> if the table, family and qualifier match,
-   *   otherwise <code>false</code>
-   */
-  public boolean matchesFamilyQualifier(TableName table, byte[] family, byte[] qualifier,
-                                Action action) {
-    if (!matchesFamily(table, family, action)) {
-      return false;
-    } else {
-      if (this.qualifier != null &&
-          (qualifier == null ||
-           !Bytes.equals(this.qualifier, qualifier))) {
-        return false;
-      }
-    }
-    return super.implies(action);
-  }
-
-  public boolean tableFieldsEqual(TablePermission other){
-    if (!(((table == null && other.getTableName() == null) ||
-           (table != null && table.equals(other.getTableName()))) &&
-         ((family == null && other.getFamily() == null) ||
-           Bytes.equals(family, other.getFamily())) &&
-         ((qualifier == null && other.getQualifier() == null) ||
-          Bytes.equals(qualifier, other.getQualifier())) &&
-         ((namespace == null && other.getNamespace() == null) ||
-          (namespace != null && namespace.equals(other.getNamespace())))
-    )) {
+  @Override
+  public boolean equalsExceptActions(Object obj) {
+    if (!(obj instanceof TablePermission)) {
       return false;
     }
-    return true;
+    TablePermission other = (TablePermission) obj;
+    return tableFieldsEqual(other);
   }
 
   @Override
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
-    justification="Passed on construction except on constructor not to be used")
   public boolean equals(Object obj) {
-    if (!(obj instanceof TablePermission)) {
-      return false;
-    }
-    TablePermission other = (TablePermission)obj;
-
-    if(!this.tableFieldsEqual(other)){
-      return false;
-    }
-
-    // check actions
-    return super.equals(other);
+    return equalsExceptActions(obj) && super.equals(obj);
   }
 
   @Override
@@ -354,41 +263,24 @@ public class TablePermission extends Permission {
     if (qualifier != null) {
       result = prime * result + Bytes.hashCode(qualifier);
     }
-    if (namespace != null) {
-      result = prime * result + namespace.hashCode();
-    }
     return result;
   }
 
   @Override
   public String toString() {
-    StringBuilder str = new StringBuilder("[TablePermission: ");
-    if(namespace != null) {
-      str.append("namespace=").append(namespace)
+    return "[TablePermission: " + rawExpression() + "]";
+  }
+
+  @Override
+  protected String rawExpression() {
+    StringBuilder raw = new StringBuilder();
+    if (table != null) {
+      raw.append("table=").append(table)
+         .append(", family=").append(family == null ? null : Bytes.toString(family))
+         .append(", qualifier=").append(qualifier == null ? null : Bytes.toString(qualifier))
          .append(", ");
     }
-    if(table != null) {
-       str.append("table=").append(table)
-          .append(", family=")
-          .append(family == null ? null : Bytes.toString(family))
-          .append(", qualifier=")
-          .append(qualifier == null ? null : Bytes.toString(qualifier))
-          .append(", ");
-    }
-    if (actions != null) {
-      str.append("actions=");
-      for (int i=0; i<actions.length; i++) {
-        if (i > 0)
-          str.append(",");
-        if (actions[i] != null)
-          str.append(actions[i].toString());
-        else
-          str.append("NULL");
-      }
-    }
-    str.append("]");
-
-    return str.toString();
+    return raw.toString() + super.rawExpression();
   }
 
   @Override
@@ -404,16 +296,13 @@ public class TablePermission extends Permission {
     if (in.readBoolean()) {
       qualifier = Bytes.readByteArray(in);
     }
-    if(in.readBoolean()) {
-      namespace = Bytes.toString(Bytes.readByteArray(in));
-    }
   }
 
   @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
     // Explicitly writing null to maintain se/deserialize backward compatibility.
-    Bytes.writeByteArray(out, (table == null) ? null : table.getName());
+    Bytes.writeByteArray(out, table == null ? null : table.getName());
     out.writeBoolean(family != null);
     if (family != null) {
       Bytes.writeByteArray(out, family);
@@ -422,9 +311,5 @@ public class TablePermission extends Permission {
     if (qualifier != null) {
       Bytes.writeByteArray(out, qualifier);
     }
-    out.writeBoolean(namespace != null);
-    if(namespace != null) {
-      Bytes.writeByteArray(out, Bytes.toBytes(namespace));
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
index 72bd69f..2a9a109 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
@@ -18,167 +18,152 @@
 
 package org.apache.hadoop.hbase.security.access;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
+import java.util.Objects;
 
 import org.apache.hadoop.hbase.TableName;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
- * Represents an authorization for access over the given table, column family
- * plus qualifier, for the given user.
+ * UserPermission consists of a user name and a permission.
+ * Permission can be one of [Global, Namespace, Table] permission.
  */
 @InterfaceAudience.Private
-public class UserPermission extends TablePermission {
-  private static final Logger LOG = LoggerFactory.getLogger(UserPermission.class);
+public class UserPermission {
 
-  private byte[] user;
+  private String user;
+  private Permission permission;
 
-  /** Nullary constructor for Writable, do not use */
-  public UserPermission() {
-    super();
+  /**
+   * Construct a global user permission.
+   * @param user user name
+   * @param assigned assigned actions
+   */
+  public UserPermission(String user, Permission.Action... assigned) {
+    this.user = user;
+    this.permission = new GlobalPermission(assigned);
   }
 
   /**
-   * Creates a new instance for the given user.
-   * @param user the user
-   * @param assigned the list of allowed actions
+   * Construct a global user permission.
+   * @param user user name
+   * @param actionCode action codes
    */
-  public UserPermission(byte[] user, Action... assigned) {
-    super(null, null, null, assigned);
+  public UserPermission(String user, byte[] actionCode) {
     this.user = user;
+    this.permission = new GlobalPermission(actionCode);
   }
 
   /**
-   * Creates a new instance for the given user,
-   * matching the actions with the given codes.
-   * @param user the user
-   * @param actionCodes the list of allowed action codes
+   * Construct a namespace user permission.
+   * @param user user name
+   * @param namespace namespace
+   * @param assigned assigned actions
    */
-  public UserPermission(byte[] user, byte[] actionCodes) {
-    super(null, null, null, actionCodes);
+  public UserPermission(String user, String namespace, Permission.Action... assigned) {
     this.user = user;
+    this.permission = new NamespacePermission(namespace, assigned);
   }
 
   /**
-   * Creates a new instance for the given user.
-   * @param user the user
-   * @param namespace
-   * @param assigned the list of allowed actions
+   * Construct a table user permission.
+   * @param user user name
+   * @param tableName table name
+   * @param assigned assigned actions
    */
-  public UserPermission(byte[] user, String namespace, Action... assigned) {
-    super(namespace, assigned);
+  public UserPermission(String user, TableName tableName, Permission.Action... assigned) {
     this.user = user;
+    this.permission = new TablePermission(tableName, assigned);
   }
 
   /**
-   * Creates a new instance for the given user,
-   * matching the actions with the given codes.
-   * @param user the user
-   * @param namespace
-   * @param actionCodes the list of allowed action codes
+   * Construct a table:family user permission.
+   * @param user user name
+   * @param tableName table name
+   * @param family family name of table
+   * @param assigned assigned actions
    */
-  public UserPermission(byte[] user, String namespace, byte[] actionCodes) {
-    super(namespace, actionCodes);
-    this.user = user;
+  public UserPermission(String user, TableName tableName, byte[] family,
+    Permission.Action... assigned) {
+    this(user, tableName, family, null, assigned);
   }
 
   /**
-   * Creates a new instance for the given user, table and column family.
-   * @param user the user
-   * @param table the table
-   * @param family the family, can be null if action is allowed over the entire
-   *   table
-   * @param assigned the list of allowed actions
+   * Construct a table:family:qualifier user permission.
+   * @param user user name
+   * @param tableName table name
+   * @param family family name of table
+   * @param qualifier qualifier name of table
+   * @param assigned assigned actions
    */
-  public UserPermission(byte[] user, TableName table, byte[] family,
-                        Action... assigned) {
-    super(table, family, assigned);
+  public UserPermission(String user, TableName tableName, byte[] family, byte[] qualifier,
+      Permission.Action... assigned) {
     this.user = user;
+    this.permission = new TablePermission(tableName, family, qualifier, assigned);
   }
 
   /**
-   * Creates a new permission for the given user, table, column family and
-   * column qualifier.
-   * @param user the user
-   * @param table the table
-   * @param family the family, can be null if action is allowed over the entire
-   *   table
-   * @param qualifier the column qualifier, can be null if action is allowed
-   *   over the entire column family
-   * @param assigned the list of allowed actions
+   * Construct a table:family:qualifier user permission.
+   * @param user user name
+   * @param tableName table name
+   * @param family family name of table
+   * @param qualifier qualifier name of table
+   * @param actionCodes assigned actions
    */
-  public UserPermission(byte[] user, TableName table, byte[] family,
-                        byte[] qualifier, Action... assigned) {
-    super(table, family, qualifier, assigned);
+  public UserPermission(String user, TableName tableName, byte[] family, byte[] qualifier,
+      byte[] actionCodes) {
     this.user = user;
+    this.permission = new TablePermission(tableName, family, qualifier, actionCodes);
   }
 
   /**
-   * Creates a new instance for the given user, table, column family and
-   * qualifier, matching the actions with the given codes.
-   * @param user the user
-   * @param table the table
-   * @param family the family, can be null if action is allowed over the entire
-   *   table
-   * @param qualifier the column qualifier, can be null if action is allowed
-   *   over the entire column family
-   * @param actionCodes the list of allowed action codes
+   * Construct a user permission given permission.
+   * @param user user name
+   * @param permission one of [Global, Namespace, Table] permission
    */
-  public UserPermission(byte[] user, TableName table, byte[] family,
-                        byte[] qualifier, byte[] actionCodes) {
-    super(table, family, qualifier, actionCodes);
+  public UserPermission(String user, Permission permission) {
     this.user = user;
+    this.permission = permission;
   }
 
   /**
-   * Creates a new instance for the given user, table, column family and
-   * qualifier, matching the actions with the given codes.
-   * @param user the user
-   * @param perm a TablePermission
+   * Get this permission access scope.
+   * @return access scope
    */
-  public UserPermission(byte[] user, TablePermission perm) {
-    super(perm.getNamespace(), perm.getTableName(), perm.getFamily(), perm.getQualifier(),
-        perm.actions);
-    this.user = user;
+  public Permission.Scope getAccessScope() {
+    return permission.getAccessScope();
   }
 
-  public byte[] getUser() {
+  public String getUser() {
     return user;
   }
 
-  /**
-   * Returns true if this permission describes a global user permission.
-   */
-  public boolean isGlobal() {
-    return(!hasTable() && !hasNamespace());
+  public Permission getPermission() {
+    return permission;
   }
 
-  @Override
-  public boolean equals(Object obj) {
+  public boolean equalsExceptActions(Object obj) {
     if (!(obj instanceof UserPermission)) {
       return false;
     }
-    UserPermission other = (UserPermission)obj;
+    UserPermission other = (UserPermission) obj;
+    return user.equals(other.user) && permission.equalsExceptActions(other.permission);
+  }
 
-    if ((Bytes.equals(user, other.getUser()) &&
-        super.equals(obj))) {
-      return true;
-    } else {
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof UserPermission)) {
       return false;
     }
+    UserPermission other = (UserPermission) obj;
+    return user.equals(other.user) && permission.equals(other.permission);
   }
 
   @Override
   public int hashCode() {
     final int prime = 37;
-    int result = super.hashCode();
+    int result = permission.hashCode();
     if (user != null) {
-      result = prime * result + Bytes.hashCode(user);
+      result = prime * result + Objects.hashCode(user);
     }
     return result;
   }
@@ -186,20 +171,8 @@ public class UserPermission extends TablePermission {
   @Override
   public String toString() {
     StringBuilder str = new StringBuilder("UserPermission: ")
-        .append("user=").append(Bytes.toString(user))
-        .append(", ").append(super.toString());
+        .append("user=").append(user)
+        .append(", ").append(permission.toString());
     return str.toString();
   }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    super.readFields(in);
-    user = Bytes.readByteArray(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    super.write(out);
-    Bytes.writeByteArray(out, user);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 78da55d..1cf43e1 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -68,7 +68,7 @@ import org.slf4j.LoggerFactory;
  * an example of configuring a user of this Auth Chore to run on a secure cluster.
  * <pre>
  * </pre>
- * This class will be internal use only from 2.2.0 version, and will transparently work
+ * This class will be internal used only from 2.2.0 version, and will transparently work
  * for kerberized applications. For more, please refer
  * <a href="http://hbase.apache.org/book.html#hbase.secure.configuration">Client-side Configuration for Secure Operation</a>
  *

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
index b6f6463..59e5601 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsWithACL.java
@@ -35,9 +35,9 @@ import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
+import org.apache.hadoop.hbase.security.access.AuthManager;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.SecureTestUtil;
-import org.apache.hadoop.hbase.security.access.TableAuthManager;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -203,7 +203,7 @@ public class TestRSGroupsWithACL extends SecureTestUtil{
   public static void tearDownAfterClass() throws Exception {
     cleanUp();
     TEST_UTIL.shutdownMiniCluster();
-    int total = TableAuthManager.getTotalRefCount();
+    int total = AuthManager.getTotalRefCount();
     assertTrue("Unexpected reference count: " + total, total == 0);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
index c31658f..986efd7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessChecker.java
@@ -50,7 +50,7 @@ public final class AccessChecker {
   // TODO: we should move to a design where we don't even instantiate an AccessChecker if
   // authorization is not enabled (like in RSRpcServices), instead of always instantiating one and
   // calling requireXXX() only to do nothing (since authorizationEnabled will be false).
-  private TableAuthManager authManager;
+  private AuthManager authManager;
 
   /** Group service to retrieve the user group information */
   private static Groups groupService;
@@ -75,7 +75,7 @@ public final class AccessChecker {
       throws RuntimeException {
     if (zkw != null) {
       try {
-        this.authManager = TableAuthManager.getOrCreate(zkw, conf);
+        this.authManager = AuthManager.getOrCreate(zkw, conf);
       } catch (IOException ioe) {
         throw new RuntimeException("Error obtaining AccessChecker", ioe);
       }
@@ -87,13 +87,13 @@ public final class AccessChecker {
   }
 
   /**
-   * Releases {@link TableAuthManager}'s reference.
+   * Releases {@link AuthManager}'s reference.
    */
   public void stop() {
-    TableAuthManager.release(authManager);
+    AuthManager.release(authManager);
   }
 
-  public TableAuthManager getAuthManager() {
+  public AuthManager getAuthManager() {
     return authManager;
   }
 
@@ -115,7 +115,7 @@ public final class AccessChecker {
     AuthResult result = null;
 
     for (Action permission : permissions) {
-      if (authManager.hasAccess(user, tableName, permission)) {
+      if (authManager.accessUserTable(user, tableName, permission)) {
         result = AuthResult.allow(request, "Table permission granted",
             user, permission, tableName, null, null);
         break;
@@ -164,7 +164,7 @@ public final class AccessChecker {
       return;
     }
     AuthResult result;
-    if (authManager.authorize(user, perm)) {
+    if (authManager.authorizeUserGlobal(user, perm)) {
       result = AuthResult.allow(request, "Global check allowed", user, perm, tableName, familyMap);
     } else {
       result = AuthResult.deny(request, "Global check failed", user, perm, tableName, familyMap);
@@ -195,7 +195,7 @@ public final class AccessChecker {
       return;
     }
     AuthResult authResult;
-    if (authManager.authorize(user, perm)) {
+    if (authManager.authorizeUserGlobal(user, perm)) {
       authResult = AuthResult.allow(request, "Global check allowed", user, perm, null);
       authResult.getParams().setNamespace(namespace);
       logResult(authResult);
@@ -225,7 +225,7 @@ public final class AccessChecker {
     AuthResult result = null;
 
     for (Action permission : permissions) {
-      if (authManager.authorize(user, namespace, permission)) {
+      if (authManager.authorizeUserNamespace(user, namespace, permission)) {
         result =
             AuthResult.allow(request, "Namespace permission granted", user, permission, namespace);
         break;
@@ -260,7 +260,7 @@ public final class AccessChecker {
     AuthResult result = null;
 
     for (Action permission : permissions) {
-      if (authManager.authorize(user, namespace, permission)) {
+      if (authManager.authorizeUserNamespace(user, namespace, permission)) {
         result =
             AuthResult.allow(request, "Namespace permission granted", user, permission, namespace);
         result.getParams().setTableName(tableName).setFamilies(familyMap);
@@ -299,7 +299,7 @@ public final class AccessChecker {
     AuthResult result = null;
 
     for (Action permission : permissions) {
-      if (authManager.authorize(user, tableName, family, qualifier, permission)) {
+      if (authManager.authorizeUserTable(user, tableName, family, qualifier, permission)) {
         result = AuthResult.allow(request, "Table permission granted",
             user, permission, tableName, family, qualifier);
         break;
@@ -337,7 +337,7 @@ public final class AccessChecker {
     AuthResult result = null;
 
     for (Action permission : permissions) {
-      if (authManager.authorize(user, tableName, null, null, permission)) {
+      if (authManager.authorizeUserTable(user, tableName, permission)) {
         result = AuthResult.allow(request, "Table permission granted",
             user, permission, tableName, null, null);
         result.getParams().setFamily(family).setQualifier(qualifier);

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
index fd48641..79233df 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlFilter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hbase.util.SimpleMutableByteRange;
  *
  * <p>
  * TODO: There is room for further performance optimization here.
- * Calling TableAuthManager.authorize() per KeyValue imposes a fair amount of
+ * Calling AuthManager.authorize() per KeyValue imposes a fair amount of
  * overhead.  A more optimized solution might look at the qualifiers where
  * permissions are actually granted and explicitly limit the scan to those.
  * </p>
@@ -58,7 +58,7 @@ class AccessControlFilter extends FilterBase {
     CHECK_CELL_DEFAULT,
   }
 
-  private TableAuthManager authManager;
+  private AuthManager authManager;
   private TableName table;
   private User user;
   private boolean isSystemTable;
@@ -75,7 +75,7 @@ class AccessControlFilter extends FilterBase {
   AccessControlFilter() {
   }
 
-  AccessControlFilter(TableAuthManager mgr, User ugi, TableName tableName,
+  AccessControlFilter(AuthManager mgr, User ugi, TableName tableName,
       Strategy strategy, Map<ByteRange, Integer> cfVsMaxVersions) {
     authManager = mgr;
     table = tableName;
@@ -119,20 +119,20 @@ class AccessControlFilter extends FilterBase {
       return ReturnCode.SKIP;
     }
     // XXX: Compare in place, don't clone
-    byte[] family = CellUtil.cloneFamily(cell);
-    byte[] qualifier = CellUtil.cloneQualifier(cell);
+    byte[] f = CellUtil.cloneFamily(cell);
+    byte[] q = CellUtil.cloneQualifier(cell);
     switch (strategy) {
       // Filter only by checking the table or CF permissions
       case CHECK_TABLE_AND_CF_ONLY: {
-        if (authManager.authorize(user, table, family, qualifier, Permission.Action.READ)) {
+        if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ)) {
           return ReturnCode.INCLUDE;
         }
       }
       break;
       // Cell permissions can override table or CF permissions
       case CHECK_CELL_DEFAULT: {
-        if (authManager.authorize(user, table, family, qualifier, Permission.Action.READ) ||
-            authManager.authorize(user, table, cell, Permission.Action.READ)) {
+        if (authManager.authorizeUserTable(user, table, f, q, Permission.Action.READ) ||
+            authManager.authorizeCell(user, table, cell, Permission.Action.READ)) {
           return ReturnCode.INCLUDE;
         }
       }


[17/51] [abbrv] hbase git commit: HBASE-21510 Test TestRegisterPeerWorkerWhenRestarting is flakey

Posted by el...@apache.org.
HBASE-21510 Test TestRegisterPeerWorkerWhenRestarting is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/701526d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/701526d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/701526d1

Branch: refs/heads/HBASE-20952
Commit: 701526d19f284f525dcb6d3e495ffa8132bd1483
Parents: d9c773b
Author: zhangduo <zh...@apache.org>
Authored: Sat Nov 24 19:50:59 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Nov 24 19:50:59 2018 +0800

----------------------------------------------------------------------
 .../replication/TestRegisterPeerWorkerWhenRestarting.java     | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/701526d1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
index 72aa32d..f46bb41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.master.replication;
 
 import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.DISPATCH_WALS_VALUE;
 import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE;
-import static org.junit.Assert.assertEquals;
 
 import java.io.IOException;
 import java.io.UncheckedIOException;
@@ -119,9 +118,9 @@ public class TestRegisterPeerWorkerWhenRestarting extends SyncReplicationTestBas
     mt.join();
     FAIL = false;
     t.join();
-    // make sure the new master can finish the transiting
-    assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
-      UTIL2.getAdmin().getReplicationPeerSyncReplicationState(PEER_ID));
+    // make sure the new master can finish the transition
+    UTIL2.waitFor(60000, () -> UTIL2.getAdmin()
+      .getReplicationPeerSyncReplicationState(PEER_ID) == SyncReplicationState.DOWNGRADE_ACTIVE);
     verify(UTIL2, 0, 100);
   }
 }


[16/51] [abbrv] hbase git commit: HBASE-21387 Addendum fix TestSnapshotFileCache

Posted by el...@apache.org.
HBASE-21387 Addendum fix TestSnapshotFileCache

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9c773b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9c773b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9c773b0

Branch: refs/heads/HBASE-20952
Commit: d9c773b0a5689f746c0e45c914b455765540df15
Parents: 6d0dc96
Author: Ted Yu <yu...@gmail.com>
Authored: Sat Nov 24 17:00:08 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Nov 24 17:00:08 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9c773b0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index 006ca2a..522b1c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -190,7 +190,7 @@ public class SnapshotFileCache implements Stoppable {
     }
     if (lock == null || lock.tryLock()) {
       try {
-        if (snapshotManager == null || snapshotManager.isTakingAnySnapshot()) {
+        if (snapshotManager != null && snapshotManager.isTakingAnySnapshot()) {
           LOG.warn("Not checking unreferenced files since snapshot is running, it will "
               + "skip to clean the HFiles this time");
           return unReferencedFiles;


[40/51] [abbrv] hbase git commit: HBASE-21541 Move MetaTableLocator.verifyRegionLocation to hbase-rsgroup module

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
----------------------------------------------------------------------
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index 7f0fb11..0cebc76 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,75 +17,55 @@
  */
 package org.apache.hadoop.hbase.zookeeper;
 
-import java.io.EOFException;
-import java.io.IOException;
-import java.net.ConnectException;
-import java.net.NoRouteToHostException;
-import java.net.SocketException;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
-import java.util.Locale;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.ipc.FailedServerException;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.MetaRegionServer;
 
 /**
- * Utility class to perform operation (get/wait for/verify/set/delete) on znode in ZooKeeper
- * which keeps hbase:meta region server location.
- *
- * Stateless class with a bunch of static methods. Doesn't manage resources passed in
- * (e.g. Connection, ZKWatcher etc).
- *
- * Meta region location is set by <code>RegionServerServices</code>.
- * This class doesn't use ZK watchers, rather accesses ZK directly.
- *
- * This class it stateless. The only reason it's not made a non-instantiable util class
- * with a collection of static methods is that it'd be rather hard to mock properly in tests.
- *
+ * Utility class to perform operation (get/wait for/verify/set/delete) on znode in ZooKeeper which
+ * keeps hbase:meta region server location.
+ * <p/>
+ * Stateless class with a bunch of static methods. Doesn't manage resources passed in (e.g.
+ * Connection, ZKWatcher etc).
+ * <p/>
+ * Meta region location is set by <code>RegionServerServices</code>. This class doesn't use ZK
+ * watchers, rather accesses ZK directly.
+ * <p/>
  * TODO: rewrite using RPC calls to master to find out about hbase:meta.
  */
 @InterfaceAudience.Private
-public class MetaTableLocator {
+public final class MetaTableLocator {
   private static final Logger LOG = LoggerFactory.getLogger(MetaTableLocator.class);
 
-  // only needed to allow non-timeout infinite waits to stop when cluster shuts down
-  private volatile boolean stopped = false;
+  private MetaTableLocator() {
+  }
 
   /**
    * Checks if the meta region location is available.
    * @return true if meta region location is available, false if not
    */
-  public boolean isLocationAvailable(ZKWatcher zkw) {
+  public static boolean isLocationAvailable(ZKWatcher zkw) {
     return getMetaRegionLocation(zkw) != null;
   }
 
@@ -93,7 +73,7 @@ public class MetaTableLocator {
    * @param zkw ZooKeeper watcher to be used
    * @return meta table regions and their locations.
    */
-  public List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw) {
+  public static List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw) {
     return getMetaRegionsAndLocations(zkw, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
@@ -104,7 +84,7 @@ public class MetaTableLocator {
    * @param replicaId the ID of the replica
    * @return meta table regions and their locations.
    */
-  public List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw,
+  public static List<Pair<RegionInfo, ServerName>> getMetaRegionsAndLocations(ZKWatcher zkw,
       int replicaId) {
     ServerName serverName = getMetaRegionLocation(zkw, replicaId);
     List<Pair<RegionInfo, ServerName>> list = new ArrayList<>(1);
@@ -119,30 +99,30 @@ public class MetaTableLocator {
    * @param zkw ZooKeeper watcher to be used
    * @return List of meta regions
    */
-  public List<RegionInfo> getMetaRegions(ZKWatcher zkw) {
+  public static List<RegionInfo> getMetaRegions(ZKWatcher zkw) {
     return getMetaRegions(zkw, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
   /**
    * Gets the meta regions for the given path and replica ID.
-   *
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
    * @param replicaId the ID of the replica
    * @return List of meta regions
    */
-  public List<RegionInfo> getMetaRegions(ZKWatcher zkw, int replicaId) {
+  public static List<RegionInfo> getMetaRegions(ZKWatcher zkw, int replicaId) {
     List<Pair<RegionInfo, ServerName>> result;
     result = getMetaRegionsAndLocations(zkw, replicaId);
     return getListOfRegionInfos(result);
   }
 
-  private List<RegionInfo> getListOfRegionInfos(final List<Pair<RegionInfo, ServerName>> pairs) {
+  private static List<RegionInfo> getListOfRegionInfos(
+      final List<Pair<RegionInfo, ServerName>> pairs) {
     if (pairs == null || pairs.isEmpty()) {
-      return Collections.EMPTY_LIST;
+      return Collections.emptyList();
     }
 
     List<RegionInfo> result = new ArrayList<>(pairs.size());
-    for (Pair<RegionInfo, ServerName> pair: pairs) {
+    for (Pair<RegionInfo, ServerName> pair : pairs) {
       result.add(pair.getFirst());
     }
     return result;
@@ -153,7 +133,7 @@ public class MetaTableLocator {
    * @param zkw zookeeper connection to use
    * @return server name or null if we failed to get the data.
    */
-  public ServerName getMetaRegionLocation(final ZKWatcher zkw) {
+  public static ServerName getMetaRegionLocation(final ZKWatcher zkw) {
     try {
       RegionState state = getMetaRegionState(zkw);
       return state.isOpened() ? state.getServerName() : null;
@@ -168,7 +148,7 @@ public class MetaTableLocator {
    * @param replicaId the ID of the replica
    * @return server name
    */
-  public ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) {
+  public static ServerName getMetaRegionLocation(final ZKWatcher zkw, int replicaId) {
     try {
       RegionState state = getMetaRegionState(zkw, replicaId);
       return state.isOpened() ? state.getServerName() : null;
@@ -178,42 +158,39 @@ public class MetaTableLocator {
   }
 
   /**
-   * Gets the meta region location, if available, and waits for up to the
-   * specified timeout if not immediately available.
-   * Given the zookeeper notification could be delayed, we will try to
-   * get the latest data.
-   *
+   * Gets the meta region location, if available, and waits for up to the specified timeout if not
+   * immediately available. Given the zookeeper notification could be delayed, we will try to get
+   * the latest data.
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
    * @param timeout maximum time to wait, in millis
-   * @return server name for server hosting meta region formatted as per
-   * {@link ServerName}, or null if none available
+   * @return server name for server hosting meta region formatted as per {@link ServerName}, or null
+   *         if none available
    * @throws InterruptedException if interrupted while waiting
    * @throws NotAllMetaRegionsOnlineException if a meta or root region is not online
    */
-  public ServerName waitMetaRegionLocation(ZKWatcher zkw, long timeout)
-    throws InterruptedException, NotAllMetaRegionsOnlineException {
+  public static ServerName waitMetaRegionLocation(ZKWatcher zkw, long timeout)
+      throws InterruptedException, NotAllMetaRegionsOnlineException {
     return waitMetaRegionLocation(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
   }
 
   /**
    * Gets the meta region location, if available, and waits for up to the specified timeout if not
-   * immediately available. Given the zookeeper notification could be delayed, we will try to
-   * get the latest data.
-   *
+   * immediately available. Given the zookeeper notification could be delayed, we will try to get
+   * the latest data.
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
    * @param replicaId the ID of the replica
    * @param timeout maximum time to wait, in millis
-   * @return server name for server hosting meta region formatted as per
-   * {@link ServerName}, or null if none available
+   * @return server name for server hosting meta region formatted as per {@link ServerName}, or null
+   *         if none available
    * @throws InterruptedException if waiting for the socket operation fails
    * @throws NotAllMetaRegionsOnlineException if a meta or root region is not online
    */
-  public ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, long timeout)
-    throws InterruptedException, NotAllMetaRegionsOnlineException {
+  public static ServerName waitMetaRegionLocation(ZKWatcher zkw, int replicaId, long timeout)
+      throws InterruptedException, NotAllMetaRegionsOnlineException {
     try {
       if (ZKUtil.checkExists(zkw, zkw.getZNodePaths().baseZNode) == -1) {
-        String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. "
-            + "There could be a mismatch with the one configured in the master.";
+        String errorMsg = "Check the value configured in 'zookeeper.znode.parent'. " +
+          "There could be a mismatch with the one configured in the master.";
         LOG.error(errorMsg);
         throw new IllegalArgumentException(errorMsg);
       }
@@ -230,198 +207,6 @@ public class MetaTableLocator {
   }
 
   /**
-   * Waits indefinitely for availability of <code>hbase:meta</code>.  Used during
-   * cluster startup.  Does not verify meta, just that something has been
-   * set up in zk.
-   * @see #waitMetaRegionLocation(ZKWatcher, long)
-   * @throws InterruptedException if interrupted while waiting
-   */
-  public void waitMetaRegionLocation(ZKWatcher zkw) throws InterruptedException {
-    long startTime = System.currentTimeMillis();
-    while (!stopped) {
-      try {
-        if (waitMetaRegionLocation(zkw, 100) != null) {
-          break;
-        }
-
-        long sleepTime = System.currentTimeMillis() - startTime;
-        // +1 in case sleepTime=0
-        if ((sleepTime + 1) % 10000 == 0) {
-          LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
-        }
-      } catch (NotAllMetaRegionsOnlineException e) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("hbase:meta still not available, sleeping and retrying." +
-            " Reason: " + e.getMessage());
-        }
-      }
-    }
-  }
-
-  /**
-   * Verify <code>hbase:meta</code> is deployed and accessible.
-   *
-   * @param hConnection the connection to use
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param timeout How long to wait on zk for meta address (passed through to
-   *                the internal call to {@link #getMetaServerConnection}.
-   * @return True if the <code>hbase:meta</code> location is healthy.
-   * @throws IOException if the number of retries for getting the connection is exceeded
-   * @throws InterruptedException if waiting for the socket operation fails
-   */
-  public boolean verifyMetaRegionLocation(ClusterConnection hConnection, ZKWatcher zkw,
-      final long timeout) throws InterruptedException, IOException {
-    return verifyMetaRegionLocation(hConnection, zkw, timeout, RegionInfo.DEFAULT_REPLICA_ID);
-  }
-
-  /**
-   * Verify <code>hbase:meta</code> is deployed and accessible.
-   *
-   * @param connection the connection to use
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param timeout How long to wait on zk for meta address (passed through to
-   * @param replicaId the ID of the replica
-   * @return True if the <code>hbase:meta</code> location is healthy.
-   * @throws InterruptedException if waiting for the socket operation fails
-   * @throws IOException if the number of retries for getting the connection is exceeded
-   */
-  public boolean verifyMetaRegionLocation(ClusterConnection connection, ZKWatcher zkw,
-      final long timeout, int replicaId) throws InterruptedException, IOException {
-    AdminProtos.AdminService.BlockingInterface service = null;
-    try {
-      service = getMetaServerConnection(connection, zkw, timeout, replicaId);
-    } catch (NotAllMetaRegionsOnlineException e) {
-      // Pass
-    } catch (ServerNotRunningYetException e) {
-      // Pass -- remote server is not up so can't be carrying root
-    } catch (UnknownHostException e) {
-      // Pass -- server name doesn't resolve so it can't be assigned anything.
-    } catch (RegionServerStoppedException e) {
-      // Pass -- server name sends us to a server that is dying or already dead.
-    }
-    return (service != null) && verifyRegionLocation(connection, service,
-            getMetaRegionLocation(zkw, replicaId), RegionReplicaUtil.getRegionInfoForReplica(
-                RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId).getRegionName());
-  }
-
-  /**
-   * Verify we can connect to <code>hostingServer</code> and that its carrying
-   * <code>regionName</code>.
-   * @param hostingServer Interface to the server hosting <code>regionName</code>
-   * @param address The servername that goes with the <code>metaServer</code> interface.
-   *                Used logging.
-   * @param regionName The regionname we are interested in.
-   * @return True if we were able to verify the region located at other side of the interface.
-   */
-  // TODO: We should be able to get the ServerName from the AdminProtocol
-  // rather than have to pass it in.  Its made awkward by the fact that the
-  // HRI is likely a proxy against remote server so the getServerName needs
-  // to be fixed to go to a local method or to a cache before we can do this.
-  private boolean verifyRegionLocation(final ClusterConnection connection,
-      AdminService.BlockingInterface hostingServer, final ServerName address,
-      final byte [] regionName) {
-    if (hostingServer == null) {
-      LOG.info("Passed hostingServer is null");
-      return false;
-    }
-    Throwable t;
-    HBaseRpcController controller = connection.getRpcControllerFactory().newController();
-    try {
-      // Try and get regioninfo from the hosting server.
-      return ProtobufUtil.getRegionInfo(controller, hostingServer, regionName) != null;
-    } catch (ConnectException e) {
-      t = e;
-    } catch (RetriesExhaustedException e) {
-      t = e;
-    } catch (RemoteException e) {
-      IOException ioe = e.unwrapRemoteException();
-      t = ioe;
-    } catch (IOException e) {
-      Throwable cause = e.getCause();
-      if (cause != null && cause instanceof EOFException) {
-        t = cause;
-      } else if (cause != null && cause.getMessage() != null
-          && cause.getMessage().contains("Connection reset")) {
-        t = cause;
-      } else {
-        t = e;
-      }
-    }
-    LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) +
-      " at address=" + address + ", exception=" + t.getMessage());
-    return false;
-  }
-
-  /**
-   * Gets a connection to the server hosting meta, as reported by ZooKeeper, waiting up to the
-   * specified timeout for availability.
-   *
-   * <p>WARNING: Does not retry.  Use an {@link org.apache.hadoop.hbase.client.HTable} instead.
-   *
-   * @param connection the connection to use
-   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
-   * @param timeout How long to wait on meta location
-   * @param replicaId the ID of the replica
-   * @return connection to server hosting meta
-   * @throws InterruptedException if waiting for the socket operation fails
-   * @throws IOException if the number of retries for getting the connection is exceeded
-   */
-  private AdminService.BlockingInterface getMetaServerConnection(ClusterConnection connection,
-      ZKWatcher zkw, long timeout, int replicaId) throws InterruptedException, IOException {
-    return getCachedConnection(connection, waitMetaRegionLocation(zkw, replicaId, timeout));
-  }
-
-  /**
-   * @param sn ServerName to get a connection against.
-   * @return The AdminProtocol we got when we connected to <code>sn</code>
-   *         May have come from cache, may not be good, may have been setup by this invocation, or
-   *         may be null.
-   * @throws IOException if the number of retries for getting the connection is exceeded
-   */
-  private static AdminService.BlockingInterface getCachedConnection(ClusterConnection connection,
-      ServerName sn) throws IOException {
-    if (sn == null) {
-      return null;
-    }
-    AdminService.BlockingInterface service = null;
-    try {
-      service = connection.getAdmin(sn);
-    } catch (RetriesExhaustedException e) {
-      if (e.getCause() != null && e.getCause() instanceof ConnectException) {
-        LOG.debug("Catch this; presume it means the cached connection has gone bad.");
-      } else {
-        throw e;
-      }
-    } catch (SocketTimeoutException e) {
-      LOG.debug("Timed out connecting to " + sn);
-    } catch (NoRouteToHostException e) {
-      LOG.debug("Connecting to " + sn, e);
-    } catch (SocketException e) {
-      LOG.debug("Exception connecting to " + sn);
-    } catch (UnknownHostException e) {
-      LOG.debug("Unknown host exception connecting to  " + sn);
-    } catch (FailedServerException e) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Server " + sn + " is in failed server list.");
-      }
-    } catch (IOException ioe) {
-      Throwable cause = ioe.getCause();
-      if (ioe instanceof ConnectException) {
-        LOG.debug("Catch. Connect refused.");
-      } else if (cause != null && cause instanceof EOFException) {
-        LOG.debug("Catch. Other end disconnected us.");
-      } else if (cause != null && cause.getMessage() != null &&
-        cause.getMessage().toLowerCase(Locale.ROOT).contains("connection reset")) {
-        LOG.debug("Catch. Connection reset.");
-      } else {
-        throw ioe;
-      }
-
-    }
-    return service;
-  }
-
-  /**
    * Sets the location of <code>hbase:meta</code> in ZooKeeper to the
    * specified server address.
    * @param zookeeper zookeeper reference
@@ -498,7 +283,7 @@ public class MetaTableLocator {
         try {
           int prefixLen = ProtobufUtil.lengthOfPBMagic();
           ZooKeeperProtos.MetaRegionServer rl =
-            ZooKeeperProtos.MetaRegionServer.PARSER.parseFrom(data, prefixLen,
+            ZooKeeperProtos.MetaRegionServer.parser().parseFrom(data, prefixLen,
                     data.length - prefixLen);
           if (rl.hasState()) {
             state = RegionState.State.convert(rl.getState());
@@ -532,12 +317,12 @@ public class MetaTableLocator {
    * @param zookeeper zookeeper reference
    * @throws KeeperException unexpected zookeeper exception
    */
-  public void deleteMetaLocation(ZKWatcher zookeeper)
+  public static void deleteMetaLocation(ZKWatcher zookeeper)
     throws KeeperException {
     deleteMetaLocation(zookeeper, RegionInfo.DEFAULT_REPLICA_ID);
   }
 
-  public void deleteMetaLocation(ZKWatcher zookeeper, int replicaId)
+  public static void deleteMetaLocation(ZKWatcher zookeeper, int replicaId)
     throws KeeperException {
     if (replicaId == RegionInfo.DEFAULT_REPLICA_ID) {
       LOG.info("Deleting hbase:meta region location in ZooKeeper");
@@ -561,7 +346,7 @@ public class MetaTableLocator {
    * @return ServerName or null if we timed out.
    * @throws InterruptedException if waiting for the socket operation fails
    */
-  public List<ServerName> blockUntilAvailable(final ZKWatcher zkw, final long timeout,
+  public static List<ServerName> blockUntilAvailable(final ZKWatcher zkw, final long timeout,
       Configuration conf) throws InterruptedException {
     int numReplicasConfigured = 1;
 
@@ -596,22 +381,21 @@ public class MetaTableLocator {
    * @return ServerName or null if we timed out.
    * @throws InterruptedException if waiting for the socket operation fails
    */
-  public ServerName blockUntilAvailable(final ZKWatcher zkw, final long timeout)
-          throws InterruptedException {
+  public static ServerName blockUntilAvailable(final ZKWatcher zkw, final long timeout)
+      throws InterruptedException {
     return blockUntilAvailable(zkw, RegionInfo.DEFAULT_REPLICA_ID, timeout);
   }
 
   /**
    * Wait until the meta region is available and is not in transition.
-   *
    * @param zkw reference to the {@link ZKWatcher} which also contains configuration and constants
    * @param replicaId the ID of the replica
    * @param timeout maximum time to wait in millis
    * @return ServerName or null if we timed out.
    * @throws InterruptedException if waiting for the socket operation fails
    */
-  public ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId, final long timeout)
-          throws InterruptedException {
+  public static ServerName blockUntilAvailable(final ZKWatcher zkw, int replicaId,
+      final long timeout) throws InterruptedException {
     if (timeout < 0) {
       throw new IllegalArgumentException();
     }
@@ -624,23 +408,12 @@ public class MetaTableLocator {
     ServerName sn = null;
     while (true) {
       sn = getMetaRegionLocation(zkw, replicaId);
-      if (sn != null || (System.currentTimeMillis() - startTime)
-          > timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
+      if (sn != null ||
+        (System.currentTimeMillis() - startTime) > timeout - HConstants.SOCKET_RETRY_WAIT_MS) {
         break;
       }
       Thread.sleep(HConstants.SOCKET_RETRY_WAIT_MS);
     }
     return sn;
   }
-
-  /**
-   * Stop working.
-   * Interrupts any ongoing waits.
-   */
-  public void stop() {
-    if (!stopped) {
-      LOG.debug("Stopping MetaTableLocator");
-      stopped = true;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
----------------------------------------------------------------------
diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
index 9153191..57c847c 100644
--- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
+++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
@@ -34,10 +34,8 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
-
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
@@ -75,6 +73,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 
@@ -1748,13 +1747,13 @@ public final class ZKUtil {
         sb.append("\n ").append(child);
       }
       sb.append("\nRegion server holding hbase:meta: "
-        + new MetaTableLocator().getMetaRegionLocation(zkw));
+        + MetaTableLocator.getMetaRegionLocation(zkw));
       Configuration conf = HBaseConfiguration.create();
       int numMetaReplicas = conf.getInt(HConstants.META_REPLICAS_NUM,
                HConstants.DEFAULT_META_REPLICA_NUM);
       for (int i = 1; i < numMetaReplicas; i++) {
         sb.append("\nRegion server holding hbase:meta, replicaId " + i + " "
-                    + new MetaTableLocator().getMetaRegionLocation(zkw, i));
+                    + MetaTableLocator.getMetaRegionLocation(zkw, i));
       }
       sb.append("\nRegion servers:");
       for (String child : listChildrenNoWatch(zkw, zkw.getZNodePaths().rsZNode)) {


[13/51] [abbrv] hbase git commit: HBASE-21034 Add new throttle type: read/write capacity unit

Posted by el...@apache.org.
HBASE-21034 Add new throttle type: read/write capacity unit

Signed-off-by: Guanghao Zhang <zg...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ded2944
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ded2944
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ded2944

Branch: refs/heads/HBASE-20952
Commit: 5ded2944199f27440a46df6f200ff2a31c1b8728
Parents: 405bf5e
Author: meiyi <my...@gamil.com>
Authored: Mon Nov 19 17:17:30 2018 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Wed Nov 21 09:46:49 2018 +0800

----------------------------------------------------------------------
 .../hbase/quotas/QuotaSettingsFactory.java      | 12 +++
 .../hadoop/hbase/quotas/ThrottleSettings.java   |  6 ++
 .../hadoop/hbase/quotas/ThrottleType.java       |  9 ++
 .../hbase/shaded/protobuf/ProtobufUtil.java     | 56 ++++++++----
 .../src/main/protobuf/Quota.proto               |  7 ++
 .../hbase/quotas/DefaultOperationQuota.java     | 71 +++++++++++----
 .../hbase/quotas/GlobalQuotaSettingsImpl.java   | 27 ++++++
 .../hadoop/hbase/quotas/NoopQuotaLimiter.java   | 11 +--
 .../hadoop/hbase/quotas/QuotaLimiter.java       | 18 ++--
 .../apache/hadoop/hbase/quotas/QuotaUtil.java   |  7 ++
 .../quotas/RegionServerRpcQuotaManager.java     |  5 +-
 .../hadoop/hbase/quotas/TimeBasedLimiter.java   | 94 +++++++++++++++++---
 .../hadoop/hbase/quotas/TestQuotaAdmin.java     | 24 ++++-
 .../hadoop/hbase/quotas/TestQuotaState.java     |  8 +-
 .../hadoop/hbase/quotas/TestQuotaThrottle.java  | 66 +++++++++++++-
 hbase-shell/src/main/ruby/hbase/quotas.rb       |  5 +-
 .../src/main/ruby/shell/commands/set_quota.rb   | 10 ++-
 hbase-shell/src/test/ruby/hbase/quotas_test.rb  | 27 ++++++
 18 files changed, 396 insertions(+), 67 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index 2a20c51..14d1ad3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -143,6 +143,18 @@ public class QuotaSettingsFactory {
       settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace,
           ThrottleType.READ_SIZE, throttle.getReadSize()));
     }
+    if (throttle.hasReqCapacityUnit()) {
+      settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace,
+        ThrottleType.REQUEST_CAPACITY_UNIT, throttle.getReqCapacityUnit()));
+    }
+    if (throttle.hasReadCapacityUnit()) {
+      settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace,
+        ThrottleType.READ_CAPACITY_UNIT, throttle.getReadCapacityUnit()));
+    }
+    if (throttle.hasWriteCapacityUnit()) {
+      settings.add(ThrottleSettings.fromTimedQuota(userName, tableName, namespace,
+        ThrottleType.WRITE_CAPACITY_UNIT, throttle.getWriteCapacityUnit()));
+    }
     return settings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
index e424d8a..05fb70b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleSettings.java
@@ -95,6 +95,12 @@ class ThrottleSettings extends QuotaSettings {
           case READ_SIZE:
             builder.append(sizeToString(timedQuota.getSoftLimit()));
             break;
+          case REQUEST_CAPACITY_UNIT:
+          case READ_CAPACITY_UNIT:
+          case WRITE_CAPACITY_UNIT:
+            builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
+            break;
+          default:
         }
       } else if (timedQuota.hasShare()) {
         builder.append(String.format("%.2f%%", timedQuota.getShare()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
index 0b0ee60..ec5b32d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottleType.java
@@ -41,4 +41,13 @@ public enum ThrottleType {
 
   /** Throttling based on the read data size */
   READ_SIZE,
+
+  /** Throttling based on the read+write capacity unit */
+  REQUEST_CAPACITY_UNIT,
+
+  /** Throttling based on the write data capacity unit */
+  WRITE_CAPACITY_UNIT,
+
+  /** Throttling based on the read data capacity unit */
+  READ_CAPACITY_UNIT,
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 6548094..cf4c831 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -2388,14 +2388,27 @@ public final class ProtobufUtil {
    */
   public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) {
     switch (proto) {
-      case REQUEST_NUMBER: return ThrottleType.REQUEST_NUMBER;
-      case REQUEST_SIZE:   return ThrottleType.REQUEST_SIZE;
-      case WRITE_NUMBER:   return ThrottleType.WRITE_NUMBER;
-      case WRITE_SIZE:     return ThrottleType.WRITE_SIZE;
-      case READ_NUMBER:    return ThrottleType.READ_NUMBER;
-      case READ_SIZE:      return ThrottleType.READ_SIZE;
+      case REQUEST_NUMBER:
+        return ThrottleType.REQUEST_NUMBER;
+      case REQUEST_SIZE:
+        return ThrottleType.REQUEST_SIZE;
+      case REQUEST_CAPACITY_UNIT:
+        return ThrottleType.REQUEST_CAPACITY_UNIT;
+      case WRITE_NUMBER:
+        return ThrottleType.WRITE_NUMBER;
+      case WRITE_SIZE:
+        return ThrottleType.WRITE_SIZE;
+      case READ_NUMBER:
+        return ThrottleType.READ_NUMBER;
+      case READ_SIZE:
+        return ThrottleType.READ_SIZE;
+      case READ_CAPACITY_UNIT:
+        return ThrottleType.READ_CAPACITY_UNIT;
+      case WRITE_CAPACITY_UNIT:
+        return ThrottleType.WRITE_CAPACITY_UNIT;
+      default:
+        throw new RuntimeException("Invalid ThrottleType " + proto);
     }
-    throw new RuntimeException("Invalid ThrottleType " + proto);
   }
 
   /**
@@ -2406,14 +2419,27 @@ public final class ProtobufUtil {
    */
   public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType type) {
     switch (type) {
-      case REQUEST_NUMBER: return QuotaProtos.ThrottleType.REQUEST_NUMBER;
-      case REQUEST_SIZE:   return QuotaProtos.ThrottleType.REQUEST_SIZE;
-      case WRITE_NUMBER:   return QuotaProtos.ThrottleType.WRITE_NUMBER;
-      case WRITE_SIZE:     return QuotaProtos.ThrottleType.WRITE_SIZE;
-      case READ_NUMBER:    return QuotaProtos.ThrottleType.READ_NUMBER;
-      case READ_SIZE:      return QuotaProtos.ThrottleType.READ_SIZE;
-    }
-    throw new RuntimeException("Invalid ThrottleType " + type);
+      case REQUEST_NUMBER:
+        return QuotaProtos.ThrottleType.REQUEST_NUMBER;
+      case REQUEST_SIZE:
+        return QuotaProtos.ThrottleType.REQUEST_SIZE;
+      case WRITE_NUMBER:
+        return QuotaProtos.ThrottleType.WRITE_NUMBER;
+      case WRITE_SIZE:
+        return QuotaProtos.ThrottleType.WRITE_SIZE;
+      case READ_NUMBER:
+        return QuotaProtos.ThrottleType.READ_NUMBER;
+      case READ_SIZE:
+        return QuotaProtos.ThrottleType.READ_SIZE;
+      case REQUEST_CAPACITY_UNIT:
+        return QuotaProtos.ThrottleType.REQUEST_CAPACITY_UNIT;
+      case READ_CAPACITY_UNIT:
+        return QuotaProtos.ThrottleType.READ_CAPACITY_UNIT;
+      case WRITE_CAPACITY_UNIT:
+        return QuotaProtos.ThrottleType.WRITE_CAPACITY_UNIT;
+      default:
+        throw new RuntimeException("Invalid ThrottleType " + type);
+    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-protocol-shaded/src/main/protobuf/Quota.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index cd4c7df..5b00d74 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -46,6 +46,9 @@ enum ThrottleType {
   WRITE_SIZE     = 4;
   READ_NUMBER    = 5;
   READ_SIZE      = 6;
+  REQUEST_CAPACITY_UNIT = 7;
+  WRITE_CAPACITY_UNIT   = 8;
+  READ_CAPACITY_UNIT    = 9;
 }
 
 message Throttle {
@@ -57,6 +60,10 @@ message Throttle {
 
   optional TimedQuota read_num  = 5;
   optional TimedQuota read_size = 6;
+
+  optional TimedQuota req_capacity_unit   = 7;
+  optional TimedQuota write_capacity_unit = 8;
+  optional TimedQuota read_capacity_unit  = 9;
 }
 
 message ThrottleRequest {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
index 1265a42..f9b3ca5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/DefaultOperationQuota.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.quotas;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
@@ -34,20 +35,29 @@ public class DefaultOperationQuota implements OperationQuota {
   private static final Logger LOG = LoggerFactory.getLogger(DefaultOperationQuota.class);
 
   private final List<QuotaLimiter> limiters;
+  private final long writeCapacityUnit;
+  private final long readCapacityUnit;
+
   private long writeAvailable = 0;
   private long readAvailable = 0;
   private long writeConsumed = 0;
   private long readConsumed = 0;
+  private long writeCapacityUnitConsumed = 0;
+  private long readCapacityUnitConsumed = 0;
   private final long[] operationSize;
 
-  public DefaultOperationQuota(final QuotaLimiter... limiters) {
-    this(Arrays.asList(limiters));
+  public DefaultOperationQuota(final Configuration conf, final QuotaLimiter... limiters) {
+    this(conf, Arrays.asList(limiters));
   }
 
   /**
    * NOTE: The order matters. It should be something like [user, table, namespace, global]
    */
-  public DefaultOperationQuota(final List<QuotaLimiter> limiters) {
+  public DefaultOperationQuota(final Configuration conf, final List<QuotaLimiter> limiters) {
+    this.writeCapacityUnit =
+        conf.getLong(QuotaUtil.WRITE_CAPACITY_UNIT_CONF_KEY, QuotaUtil.DEFAULT_WRITE_CAPACITY_UNIT);
+    this.readCapacityUnit =
+        conf.getLong(QuotaUtil.READ_CAPACITY_UNIT_CONF_KEY, QuotaUtil.DEFAULT_READ_CAPACITY_UNIT);
     this.limiters = limiters;
     int size = OperationType.values().length;
     operationSize = new long[size];
@@ -58,24 +68,28 @@ public class DefaultOperationQuota implements OperationQuota {
   }
 
   @Override
-  public void checkQuota(int numWrites, int numReads, int numScans)
-      throws RpcThrottlingException {
+  public void checkQuota(int numWrites, int numReads, int numScans) throws RpcThrottlingException {
     writeConsumed = estimateConsume(OperationType.MUTATE, numWrites, 100);
-    readConsumed  = estimateConsume(OperationType.GET, numReads, 100);
+    readConsumed = estimateConsume(OperationType.GET, numReads, 100);
     readConsumed += estimateConsume(OperationType.SCAN, numScans, 1000);
 
+    writeCapacityUnitConsumed = calculateWriteCapacityUnit(writeConsumed);
+    readCapacityUnitConsumed = calculateReadCapacityUnit(readConsumed);
+
     writeAvailable = Long.MAX_VALUE;
     readAvailable = Long.MAX_VALUE;
-    for (final QuotaLimiter limiter: limiters) {
+    for (final QuotaLimiter limiter : limiters) {
       if (limiter.isBypass()) continue;
 
-      limiter.checkQuota(numWrites, writeConsumed, numReads + numScans, readConsumed);
+      limiter.checkQuota(numWrites, writeConsumed, numReads + numScans, readConsumed,
+        writeCapacityUnitConsumed, readCapacityUnitConsumed);
       readAvailable = Math.min(readAvailable, limiter.getReadAvailable());
       writeAvailable = Math.min(writeAvailable, limiter.getWriteAvailable());
     }
 
-    for (final QuotaLimiter limiter: limiters) {
-      limiter.grabQuota(numWrites, writeConsumed, numReads + numScans, readConsumed);
+    for (final QuotaLimiter limiter : limiters) {
+      limiter.grabQuota(numWrites, writeConsumed, numReads + numScans, readConsumed,
+        writeCapacityUnitConsumed, readCapacityUnitConsumed);
     }
   }
 
@@ -83,12 +97,21 @@ public class DefaultOperationQuota implements OperationQuota {
   public void close() {
     // Adjust the quota consumed for the specified operation
     long writeDiff = operationSize[OperationType.MUTATE.ordinal()] - writeConsumed;
-    long readDiff = operationSize[OperationType.GET.ordinal()] +
-        operationSize[OperationType.SCAN.ordinal()] - readConsumed;
-
-    for (final QuotaLimiter limiter: limiters) {
-      if (writeDiff != 0) limiter.consumeWrite(writeDiff);
-      if (readDiff != 0) limiter.consumeRead(readDiff);
+    long readDiff = operationSize[OperationType.GET.ordinal()]
+        + operationSize[OperationType.SCAN.ordinal()] - readConsumed;
+    long writeCapacityUnitDiff = calculateWriteCapacityUnitDiff(
+      operationSize[OperationType.MUTATE.ordinal()], writeConsumed);
+    long readCapacityUnitDiff = calculateReadCapacityUnitDiff(
+      operationSize[OperationType.GET.ordinal()] + operationSize[OperationType.SCAN.ordinal()],
+      readConsumed);
+
+    for (final QuotaLimiter limiter : limiters) {
+      if (writeDiff != 0) {
+        limiter.consumeWrite(writeDiff, writeCapacityUnitDiff);
+      }
+      if (readDiff != 0) {
+        limiter.consumeRead(readDiff, readCapacityUnitDiff);
+      }
     }
   }
 
@@ -123,4 +146,20 @@ public class DefaultOperationQuota implements OperationQuota {
     }
     return 0;
   }
+
+  private long calculateWriteCapacityUnit(final long size) {
+    return (long) Math.ceil(size * 1.0 / this.writeCapacityUnit);
+  }
+
+  private long calculateReadCapacityUnit(final long size) {
+    return (long) Math.ceil(size * 1.0 / this.readCapacityUnit);
+  }
+
+  private long calculateWriteCapacityUnitDiff(final long actualSize, final long estimateSize) {
+    return calculateWriteCapacityUnit(actualSize) - calculateWriteCapacityUnit(estimateSize);
+  }
+
+  private long calculateReadCapacityUnitDiff(final long actualSize, final long estimateSize) {
+    return calculateReadCapacityUnit(actualSize) - calculateReadCapacityUnit(estimateSize);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java
index 3119691..0c6cb81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/GlobalQuotaSettingsImpl.java
@@ -149,6 +149,16 @@ public class GlobalQuotaSettingsImpl extends GlobalQuotaSettings {
           case READ_SIZE:
             throttleBuilder.setReadSize(otherProto.getTimedQuota());
             break;
+          case REQUEST_CAPACITY_UNIT:
+            throttleBuilder.setReqCapacityUnit(otherProto.getTimedQuota());
+            break;
+          case READ_CAPACITY_UNIT:
+            throttleBuilder.setReadCapacityUnit(otherProto.getTimedQuota());
+            break;
+          case WRITE_CAPACITY_UNIT:
+            throttleBuilder.setWriteCapacityUnit(otherProto.getTimedQuota());
+            break;
+          default:
         }
       }
     }
@@ -232,6 +242,11 @@ public class GlobalQuotaSettingsImpl extends GlobalQuotaSettings {
             case READ_SIZE:
               builder.append(sizeToString(timedQuota.getSoftLimit()));
               break;
+            case REQUEST_CAPACITY_UNIT:
+            case READ_CAPACITY_UNIT:
+            case WRITE_CAPACITY_UNIT:
+              builder.append(String.format("%dCU", timedQuota.getSoftLimit()));
+            default:
           }
         } else if (timedQuota.hasShare()) {
           builder.append(String.format("%.2f%%", timedQuota.getShare()));
@@ -289,6 +304,15 @@ public class GlobalQuotaSettingsImpl extends GlobalQuotaSettings {
     if (proto.hasWriteSize()) {
       quotas.put(ThrottleType.WRITE_SIZE, proto.getWriteSize());
     }
+    if (proto.hasReqCapacityUnit()) {
+      quotas.put(ThrottleType.REQUEST_CAPACITY_UNIT, proto.getReqCapacityUnit());
+    }
+    if (proto.hasReadCapacityUnit()) {
+      quotas.put(ThrottleType.READ_CAPACITY_UNIT, proto.getReqCapacityUnit());
+    }
+    if (proto.hasWriteCapacityUnit()) {
+      quotas.put(ThrottleType.WRITE_CAPACITY_UNIT, proto.getWriteCapacityUnit());
+    }
     return quotas;
   }
 
@@ -299,5 +323,8 @@ public class GlobalQuotaSettingsImpl extends GlobalQuotaSettings {
     builder.clearReqSize();
     builder.clearWriteNum();
     builder.clearWriteSize();
+    builder.clearReadCapacityUnit();
+    builder.clearReadCapacityUnit();
+    builder.clearWriteCapacityUnit();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
index 3cca955..71dd3c7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/NoopQuotaLimiter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.quotas;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
-import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
 
 /**
  * Noop quota limiter returned when no limiter is associated to the user/table
@@ -36,22 +35,24 @@ class NoopQuotaLimiter implements QuotaLimiter {
 
   @Override
   public void checkQuota(long writeReqs, long estimateWriteSize, long readReqs,
-      long estimateReadSize) throws RpcThrottlingException {
+      long estimateReadSize, long estimateWriteCapacityUnit, long estimateReadCapacityUnit)
+      throws RpcThrottlingException {
     // no-op
   }
 
   @Override
-  public void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize) {
+  public void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize,
+      long writeCapacityUnit, long readCapacityUnit) {
     // no-op
   }
 
   @Override
-  public void consumeWrite(final long size) {
+  public void consumeWrite(final long size, long capacityUnit) {
     // no-op
   }
 
   @Override
-  public void consumeRead(final long size) {
+  public void consumeRead(final long size, long capacityUnit) {
     // no-op
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
index 7cb29b3..9260ec2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaLimiter.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.quotas;
 
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
-import org.apache.hadoop.hbase.quotas.OperationQuota.OperationType;
 
 /**
  * Internal interface used to interact with the user/table quota.
@@ -35,10 +34,14 @@ public interface QuotaLimiter {
    * @param estimateWriteSize the write size that will be checked against the available quota
    * @param readReqs the read requests that will be checked against the available quota
    * @param estimateReadSize the read size that will be checked against the available quota
+   * @param estimateWriteCapacityUnit the write capacity unit that will be checked against the
+   *          available quota
+   * @param estimateReadCapacityUnit the read capacity unit that will be checked against the
+   *          available quota
    * @throws RpcThrottlingException thrown if not enough available resources to perform operation.
    */
-  void checkQuota(long writeReqs, long estimateWriteSize, long readReqs, long estimateReadSize)
-      throws RpcThrottlingException;
+  void checkQuota(long writeReqs, long estimateWriteSize, long readReqs, long estimateReadSize,
+      long estimateWriteCapacityUnit, long estimateReadCapacityUnit) throws RpcThrottlingException;
 
   /**
    * Removes the specified write and read amount from the quota.
@@ -49,20 +52,23 @@ public interface QuotaLimiter {
    * @param writeSize the write size that will be removed from the current quota
    * @param readReqs the read requests that will be removed from the current quota
    * @param readSize the read size that will be removed from the current quota
+   * @param writeCapacityUnit the write capacity unit that will be removed from the current quota
+   * @param readCapacityUnit the read capacity unit num that will be removed from the current quota
    */
-  void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize);
+  void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize,
+      long writeCapacityUnit, long readCapacityUnit);
 
   /**
    * Removes or add back some write amount to the quota.
    * (called at the end of an operation in case the estimate quota was off)
    */
-  void consumeWrite(long size);
+  void consumeWrite(long size, long capacityUnit);
 
   /**
    * Removes or add back some read amount to the quota.
    * (called at the end of an operation in case the estimate quota was off)
    */
-  void consumeRead(long size);
+  void consumeRead(long size, long capacityUnit);
 
   /** @return true if the limiter is a noop */
   boolean isBypass();

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
index 6bc3ce9..f6b5d95 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/QuotaUtil.java
@@ -57,6 +57,13 @@ public class QuotaUtil extends QuotaTableUtil {
   public static final String QUOTA_CONF_KEY = "hbase.quota.enabled";
   private static final boolean QUOTA_ENABLED_DEFAULT = false;
 
+  public static final String READ_CAPACITY_UNIT_CONF_KEY = "hbase.quota.read.capacity.unit";
+  // the default one read capacity unit is 1024 bytes (1KB)
+  public static final long DEFAULT_READ_CAPACITY_UNIT = 1024;
+  public static final String WRITE_CAPACITY_UNIT_CONF_KEY = "hbase.quota.write.capacity.unit";
+  // the default one write capacity unit is 1024 bytes (1KB)
+  public static final long DEFAULT_WRITE_CAPACITY_UNIT = 1024;
+
   /** Table descriptor for Quota internal table */
   public static final HTableDescriptor QUOTA_TABLE_DESC =
     new HTableDescriptor(QUOTA_TABLE_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java
index 7c21f45..40e70dc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/RegionServerRpcQuotaManager.java
@@ -102,7 +102,7 @@ public class RegionServerRpcQuotaManager {
           LOG.trace("get quota for ugi=" + ugi + " table=" + table + " userLimiter=" + userLimiter);
         }
         if (!useNoop) {
-          return new DefaultOperationQuota(userLimiter);
+          return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter);
         }
       } else {
         QuotaLimiter nsLimiter = quotaCache.getNamespaceLimiter(table.getNamespaceAsString());
@@ -113,7 +113,8 @@ public class RegionServerRpcQuotaManager {
                     userLimiter + " tableLimiter=" + tableLimiter + " nsLimiter=" + nsLimiter);
         }
         if (!useNoop) {
-          return new DefaultOperationQuota(userLimiter, tableLimiter, nsLimiter);
+          return new DefaultOperationQuota(this.rsServices.getConfiguration(), userLimiter,
+              tableLimiter, nsLimiter);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
index 02dffcf..771eed1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/TimeBasedLimiter.java
@@ -40,6 +40,9 @@ public class TimeBasedLimiter implements QuotaLimiter {
   private RateLimiter writeSizeLimiter = null;
   private RateLimiter readReqsLimiter = null;
   private RateLimiter readSizeLimiter = null;
+  private RateLimiter reqCapacityUnitLimiter = null;
+  private RateLimiter writeCapacityUnitLimiter = null;
+  private RateLimiter readCapacityUnitLimiter = null;
 
   private TimeBasedLimiter() {
     if (FixedIntervalRateLimiter.class.getName().equals(
@@ -51,6 +54,9 @@ public class TimeBasedLimiter implements QuotaLimiter {
       writeSizeLimiter = new FixedIntervalRateLimiter();
       readReqsLimiter = new FixedIntervalRateLimiter();
       readSizeLimiter = new FixedIntervalRateLimiter();
+      reqCapacityUnitLimiter = new FixedIntervalRateLimiter();
+      writeCapacityUnitLimiter = new FixedIntervalRateLimiter();
+      readCapacityUnitLimiter = new FixedIntervalRateLimiter();
     } else {
       reqsLimiter = new AverageIntervalRateLimiter();
       reqSizeLimiter = new AverageIntervalRateLimiter();
@@ -58,6 +64,9 @@ public class TimeBasedLimiter implements QuotaLimiter {
       writeSizeLimiter = new AverageIntervalRateLimiter();
       readReqsLimiter = new AverageIntervalRateLimiter();
       readSizeLimiter = new AverageIntervalRateLimiter();
+      reqCapacityUnitLimiter = new AverageIntervalRateLimiter();
+      writeCapacityUnitLimiter = new AverageIntervalRateLimiter();
+      readCapacityUnitLimiter = new AverageIntervalRateLimiter();
     }
   }
 
@@ -93,6 +102,21 @@ public class TimeBasedLimiter implements QuotaLimiter {
       setFromTimedQuota(limiter.readSizeLimiter, throttle.getReadSize());
       isBypass = false;
     }
+
+    if (throttle.hasReqCapacityUnit()) {
+      setFromTimedQuota(limiter.reqCapacityUnitLimiter, throttle.getReqCapacityUnit());
+      isBypass = false;
+    }
+
+    if (throttle.hasWriteCapacityUnit()) {
+      setFromTimedQuota(limiter.writeCapacityUnitLimiter, throttle.getWriteCapacityUnit());
+      isBypass = false;
+    }
+
+    if (throttle.hasReadCapacityUnit()) {
+      setFromTimedQuota(limiter.readCapacityUnitLimiter, throttle.getReadCapacityUnit());
+      isBypass = false;
+    }
     return isBypass ? NoopQuotaLimiter.get() : limiter;
   }
 
@@ -103,6 +127,9 @@ public class TimeBasedLimiter implements QuotaLimiter {
     writeSizeLimiter.update(other.writeSizeLimiter);
     readReqsLimiter.update(other.readReqsLimiter);
     readSizeLimiter.update(other.readSizeLimiter);
+    reqCapacityUnitLimiter.update(other.reqCapacityUnitLimiter);
+    writeCapacityUnitLimiter.update(other.writeCapacityUnitLimiter);
+    readCapacityUnitLimiter.update(other.readCapacityUnitLimiter);
   }
 
   private static void setFromTimedQuota(final RateLimiter limiter, final TimedQuota timedQuota) {
@@ -111,7 +138,8 @@ public class TimeBasedLimiter implements QuotaLimiter {
 
   @Override
   public void checkQuota(long writeReqs, long estimateWriteSize, long readReqs,
-      long estimateReadSize) throws RpcThrottlingException {
+      long estimateReadSize, long estimateWriteCapacityUnit, long estimateReadCapacityUnit)
+      throws RpcThrottlingException {
     if (!reqsLimiter.canExecute(writeReqs + readReqs)) {
       RpcThrottlingException.throwNumRequestsExceeded(reqsLimiter.waitInterval());
     }
@@ -119,6 +147,10 @@ public class TimeBasedLimiter implements QuotaLimiter {
       RpcThrottlingException.throwRequestSizeExceeded(
           reqSizeLimiter.waitInterval(estimateWriteSize + estimateReadSize));
     }
+    if (!reqCapacityUnitLimiter.canExecute(estimateWriteCapacityUnit + estimateReadCapacityUnit)) {
+      RpcThrottlingException.throwRequestSizeExceeded(
+        reqCapacityUnitLimiter.waitInterval(estimateWriteCapacityUnit + estimateReadCapacityUnit));
+    }
 
     if (estimateWriteSize > 0) {
       if (!writeReqsLimiter.canExecute(writeReqs)) {
@@ -128,6 +160,10 @@ public class TimeBasedLimiter implements QuotaLimiter {
         RpcThrottlingException.throwWriteSizeExceeded(
             writeSizeLimiter.waitInterval(estimateWriteSize));
       }
+      if (!writeCapacityUnitLimiter.canExecute(estimateWriteCapacityUnit)) {
+        RpcThrottlingException.throwWriteSizeExceeded(
+          writeCapacityUnitLimiter.waitInterval(estimateWriteCapacityUnit));
+      }
     }
 
     if (estimateReadSize > 0) {
@@ -138,11 +174,16 @@ public class TimeBasedLimiter implements QuotaLimiter {
         RpcThrottlingException.throwReadSizeExceeded(
             readSizeLimiter.waitInterval(estimateReadSize));
       }
+      if (!readCapacityUnitLimiter.canExecute(estimateReadCapacityUnit)) {
+        RpcThrottlingException
+            .throwWriteSizeExceeded(readCapacityUnitLimiter.waitInterval(estimateReadCapacityUnit));
+      }
     }
   }
 
   @Override
-  public void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize) {
+  public void grabQuota(long writeReqs, long writeSize, long readReqs, long readSize,
+      long writeCapacityUnit, long readCapacityUnit) {
     assert writeSize != 0 || readSize != 0;
 
     reqsLimiter.consume(writeReqs + readReqs);
@@ -156,18 +197,30 @@ public class TimeBasedLimiter implements QuotaLimiter {
       readReqsLimiter.consume(readReqs);
       readSizeLimiter.consume(readSize);
     }
+    if (writeCapacityUnit > 0) {
+      reqCapacityUnitLimiter.consume(writeCapacityUnit);
+      writeCapacityUnitLimiter.consume(writeCapacityUnit);
+    }
+    if (readCapacityUnit > 0) {
+      reqCapacityUnitLimiter.consume(readCapacityUnit);
+      readCapacityUnitLimiter.consume(readCapacityUnit);
+    }
   }
 
   @Override
-  public void consumeWrite(final long size) {
+  public void consumeWrite(final long size, long capacityUnit) {
     reqSizeLimiter.consume(size);
     writeSizeLimiter.consume(size);
+    reqCapacityUnitLimiter.consume(capacityUnit);
+    writeCapacityUnitLimiter.consume(capacityUnit);
   }
 
   @Override
-  public void consumeRead(final long size) {
+  public void consumeRead(final long size, long capacityUnit) {
     reqSizeLimiter.consume(size);
     readSizeLimiter.consume(size);
+    reqCapacityUnitLimiter.consume(capacityUnit);
+    readCapacityUnitLimiter.consume(capacityUnit);
   }
 
   @Override
@@ -189,12 +242,33 @@ public class TimeBasedLimiter implements QuotaLimiter {
   public String toString() {
     StringBuilder builder = new StringBuilder();
     builder.append("TimeBasedLimiter(");
-    if (!reqsLimiter.isBypass()) builder.append("reqs=" + reqsLimiter);
-    if (!reqSizeLimiter.isBypass()) builder.append(" resSize=" + reqSizeLimiter);
-    if (!writeReqsLimiter.isBypass()) builder.append(" writeReqs=" + writeReqsLimiter);
-    if (!writeSizeLimiter.isBypass()) builder.append(" writeSize=" + writeSizeLimiter);
-    if (!readReqsLimiter.isBypass()) builder.append(" readReqs=" + readReqsLimiter);
-    if (!readSizeLimiter.isBypass()) builder.append(" readSize=" + readSizeLimiter);
+    if (!reqsLimiter.isBypass()) {
+      builder.append("reqs=" + reqsLimiter);
+    }
+    if (!reqSizeLimiter.isBypass()) {
+      builder.append(" resSize=" + reqSizeLimiter);
+    }
+    if (!writeReqsLimiter.isBypass()) {
+      builder.append(" writeReqs=" + writeReqsLimiter);
+    }
+    if (!writeSizeLimiter.isBypass()) {
+      builder.append(" writeSize=" + writeSizeLimiter);
+    }
+    if (!readReqsLimiter.isBypass()) {
+      builder.append(" readReqs=" + readReqsLimiter);
+    }
+    if (!readSizeLimiter.isBypass()) {
+      builder.append(" readSize=" + readSizeLimiter);
+    }
+    if (!reqCapacityUnitLimiter.isBypass()) {
+      builder.append(" reqCapacityUnit=" + reqCapacityUnitLimiter);
+    }
+    if (!writeCapacityUnitLimiter.isBypass()) {
+      builder.append(" writeCapacityUnit=" + writeCapacityUnitLimiter);
+    }
+    if (!readCapacityUnitLimiter.isBypass()) {
+      builder.append(" readCapacityUnit=" + readCapacityUnitLimiter);
+    }
     builder.append(')');
     return builder.toString();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index b84dc83..03e0aa5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -455,17 +455,22 @@ public class TestQuotaAdmin {
 
   @Test
   public void testSetGetRemoveRPCQuota() throws Exception {
+    testSetGetRemoveRPCQuota(ThrottleType.REQUEST_SIZE);
+    testSetGetRemoveRPCQuota(ThrottleType.REQUEST_CAPACITY_UNIT);
+  }
+
+  private void testSetGetRemoveRPCQuota(ThrottleType throttleType) throws Exception {
     Admin admin = TEST_UTIL.getAdmin();
     final TableName tn = TableName.valueOf("sq_table1");
     QuotaSettings settings =
-        QuotaSettingsFactory.throttleTable(tn, ThrottleType.REQUEST_SIZE, 2L, TimeUnit.HOURS);
+        QuotaSettingsFactory.throttleTable(tn, throttleType, 2L, TimeUnit.HOURS);
     admin.setQuota(settings);
 
     // Verify the Quota in the table
-    verifyRecordPresentInQuotaTable(ThrottleType.REQUEST_SIZE, 2L, TimeUnit.HOURS);
+    verifyRecordPresentInQuotaTable(throttleType, 2L, TimeUnit.HOURS);
 
     // Verify we can retrieve it via the QuotaRetriever API
-    verifyFetchableViaAPI(admin, ThrottleType.REQUEST_SIZE, 2L, TimeUnit.HOURS);
+    verifyFetchableViaAPI(admin, throttleType, 2L, TimeUnit.HOURS);
 
     // Now, remove the quota
     QuotaSettings removeQuota = QuotaSettingsFactory.unthrottleTable(tn);
@@ -584,6 +589,19 @@ public class TestQuotaAdmin {
         assertTrue(rpcQuota.hasWriteSize());
         t = rpcQuota.getWriteSize();
         break;
+      case REQUEST_CAPACITY_UNIT:
+        assertTrue(rpcQuota.hasReqCapacityUnit());
+        t = rpcQuota.getReqCapacityUnit();
+        break;
+      case READ_CAPACITY_UNIT:
+        assertTrue(rpcQuota.hasReadCapacityUnit());
+        t = rpcQuota.getReadCapacityUnit();
+        break;
+      case WRITE_CAPACITY_UNIT:
+        assertTrue(rpcQuota.hasWriteCapacityUnit());
+        t = rpcQuota.getWriteCapacityUnit();
+        break;
+      default:
     }
 
     assertEquals(t.getSoftLimit(), limit);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
index 0cbc445..73b253c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaState.java
@@ -224,7 +224,7 @@ public class TestQuotaState {
     assertFalse(quotaInfo.isBypass());
     QuotaLimiter limiter = quotaInfo.getTableLimiter(TABLE_A);
     try {
-      limiter.checkQuota(TABLE_A_THROTTLE_1 + 1, TABLE_A_THROTTLE_1 + 1, 0, 0);
+      limiter.checkQuota(TABLE_A_THROTTLE_1 + 1, TABLE_A_THROTTLE_1 + 1, 0, 0, 1, 0);
       fail("Should have thrown RpcThrottlingException");
     } catch (RpcThrottlingException e) {
       // expected
@@ -242,7 +242,7 @@ public class TestQuotaState {
   private void assertThrottleException(final QuotaLimiter limiter, final int availReqs) {
     assertNoThrottleException(limiter, availReqs);
     try {
-      limiter.checkQuota(1, 1, 0, 0);
+      limiter.checkQuota(1, 1, 0, 0, 1, 0);
       fail("Should have thrown RpcThrottlingException");
     } catch (RpcThrottlingException e) {
       // expected
@@ -252,11 +252,11 @@ public class TestQuotaState {
   private void assertNoThrottleException(final QuotaLimiter limiter, final int availReqs) {
     for (int i = 0; i < availReqs; ++i) {
       try {
-        limiter.checkQuota(1, 1, 0, 0);
+        limiter.checkQuota(1, 1, 0, 0, 1, 0);
       } catch (RpcThrottlingException e) {
         fail("Unexpected RpcThrottlingException after " + i + " requests. limit=" + availReqs);
       }
-      limiter.grabQuota(1, 1, 0, 0);
+      limiter.grabQuota(1, 1, 0, 0, 1, 0);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
index 59ba322..e506a08 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaThrottle.java
@@ -509,13 +509,67 @@ public class TestQuotaThrottle {
     assertEquals(30, doGets(30, tables[1]));
   }
 
+  @Test
+  public void testTableWriteCapacityUnitThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getAdmin();
+
+    // Add 6CU/min limit
+    admin.setQuota(QuotaSettingsFactory.throttleTable(TABLE_NAMES[0],
+      ThrottleType.WRITE_CAPACITY_UNIT, 6, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[0]);
+
+    // should execute at max 6 capacity units because each put size is 1 capacity unit
+    assertEquals(6, doPuts(20, 10, tables[0]));
+
+    // wait a minute and you should execute at max 3 capacity units because each put size is 2
+    // capacity unit
+    waitMinuteQuota();
+    assertEquals(3, doPuts(20, 1025, tables[0]));
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[0]));
+    triggerTableCacheRefresh(true, TABLE_NAMES[0]);
+  }
+
+  @Test
+  public void testTableReadCapacityUnitThrottle() throws Exception {
+    final Admin admin = TEST_UTIL.getAdmin();
+
+    // Add 6CU/min limit
+    admin.setQuota(QuotaSettingsFactory.throttleTable(TABLE_NAMES[0],
+      ThrottleType.READ_CAPACITY_UNIT, 6, TimeUnit.MINUTES));
+    triggerTableCacheRefresh(false, TABLE_NAMES[0]);
+
+    assertEquals(20, doPuts(20, 10, tables[0]));
+    // should execute at max 6 capacity units because each get size is 1 capacity unit
+    assertEquals(6, doGets(20, tables[0]));
+
+    assertEquals(20, doPuts(20, 2015, tables[0]));
+    // wait a minute and you should execute at max 3 capacity units because each get size is 2
+    // capacity unit on tables[0]
+    waitMinuteQuota();
+    assertEquals(3, doGets(20, tables[0]));
+
+    admin.setQuota(QuotaSettingsFactory.unthrottleTable(TABLE_NAMES[0]));
+    triggerTableCacheRefresh(true, TABLE_NAMES[0]);
+  }
+
   private int doPuts(int maxOps, final Table... tables) throws Exception {
+    return doPuts(maxOps, -1, tables);
+  }
+
+  private int doPuts(int maxOps, int valueSize, final Table... tables) throws Exception {
     int count = 0;
     try {
       while (count < maxOps) {
         Put put = new Put(Bytes.toBytes("row-" + count));
-        put.addColumn(FAMILY, QUALIFIER, Bytes.toBytes("data-" + count));
-        for (final Table table: tables) {
+        byte[] value;
+        if (valueSize < 0) {
+          value = Bytes.toBytes("data-" + count);
+        } else {
+          value = generateValue(valueSize);
+        }
+        put.addColumn(FAMILY, QUALIFIER, value);
+        for (final Table table : tables) {
           table.put(put);
         }
         count += tables.length;
@@ -526,6 +580,14 @@ public class TestQuotaThrottle {
     return count;
   }
 
+  private byte[] generateValue(int valueSize) {
+    byte[] bytes = new byte[valueSize];
+    for (int i = 0; i < valueSize; i++) {
+      bytes[i] = 'a';
+    }
+    return bytes;
+  }
+
   private long doGets(int maxOps, final Table... tables) throws Exception {
     int count = 0;
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-shell/src/main/ruby/hbase/quotas.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb b/hbase-shell/src/main/ruby/hbase/quotas.rb
index 1ea8d28..1ba9594 100644
--- a/hbase-shell/src/main/ruby/hbase/quotas.rb
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -259,11 +259,14 @@ module Hbase
 
     def _parse_limit(str_limit, type_cls, type)
       str_limit = str_limit.downcase
-      match = /(\d+)(req|[bkmgtp])\/(sec|min|hour|day)/.match(str_limit)
+      match = /(\d+)(req|cu|[bkmgtp])\/(sec|min|hour|day)/.match(str_limit)
       if match
         if match[2] == 'req'
           limit = match[1].to_i
           type = type_cls.valueOf(type + '_NUMBER')
+        elsif match[2] == 'cu'
+          limit = match[1].to_i
+          type = type_cls.valueOf(type + '_CAPACITY_UNIT')
         else
           limit = _size_from_str(match[1].to_i, match[2])
           type = type_cls.valueOf(type + '_SIZE')

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_quota.rb b/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
index ed593b6..3a5c136 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_quota.rb
@@ -26,11 +26,12 @@ Set a quota for a user, table, or namespace.
 Syntax : set_quota TYPE => <type>, <args>
 
 TYPE => THROTTLE
-User can either set quota on read, write or on both the requests together(i.e., read+write)
+User can either set quota on read, write or on both the requests together(i.e., read+write).
 The read, write, or read+write(default throttle type) request limit can be expressed using
-the form 100req/sec, 100req/min and the read, write, read+write(default throttle type) limit
+the form 100req/sec, 100req/min; the read, write, read+write(default throttle type) limit
 can be expressed using the form 100k/sec, 100M/min with (B, K, M, G, T, P) as valid size unit
-and (sec, min, hour, day) as valid time unit.
+; the read, write, read+write(default throttle type) limit can be expressed using the form
+100CU/sec as capacity unit. The valid time units are (sec, min, hour, day).
 Currently the throttle limit is per machine - a limit of 100req/min
 means that each machine can execute 100req/min.
 
@@ -42,6 +43,9 @@ For example:
     hbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10M/sec'
     hbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10M/sec'
 
+    hbase> set_quota TYPE => THROTTLE, USER => 'u1', LIMIT => '10CU/sec'
+    hbase> set_quota TYPE => THROTTLE, THROTTLE_TYPE => WRITE, USER => 'u1', LIMIT => '10CU/sec'
+
     hbase> set_quota TYPE => THROTTLE, USER => 'u1', TABLE => 't2', LIMIT => '5K/min'
     hbase> set_quota TYPE => THROTTLE, USER => 'u1', NAMESPACE => 'ns2', LIMIT => NONE
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ded2944/hbase-shell/src/test/ruby/hbase/quotas_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/quotas_test.rb b/hbase-shell/src/test/ruby/hbase/quotas_test.rb
index 3fb00c8..be6b238 100644
--- a/hbase-shell/src/test/ruby/hbase/quotas_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/quotas_test.rb
@@ -136,5 +136,32 @@ module Hbase
       assert(output.include? snapshot1)
       assert(output.include? snapshot2)
     end
+
+    define_test 'can set and remove user CU quota' do
+      command(:set_quota, TYPE => THROTTLE, USER => 'user1', LIMIT => '1CU/sec')
+      output = capture_stdout{ command(:list_quotas) }
+      assert(output.include?('USER => user1'))
+      assert(output.include?('TYPE => THROTTLE'))
+      assert(output.include?('THROTTLE_TYPE => REQUEST_CAPACITY_UNIT'))
+      assert(output.include?('LIMIT => 1CU/sec'))
+
+      command(:set_quota, TYPE => THROTTLE, USER => 'user1', LIMIT => NONE)
+      output = capture_stdout{ command(:list_quotas) }
+      assert(output.include?('0 row(s)'))
+    end
+
+    define_test 'can set and remove table CU quota' do
+      command(:set_quota, TYPE => THROTTLE, TABLE => @test_name,
+              THROTTLE_TYPE => WRITE, LIMIT => '2CU/min')
+      output = capture_stdout{ command(:list_quotas) }
+      assert(output.include?('TABLE => hbase_shell_quota_tests_table'))
+      assert(output.include?('TYPE => THROTTLE'))
+      assert(output.include?('THROTTLE_TYPE => WRITE_CAPACITY_UNIT'))
+      assert(output.include?('LIMIT => 2CU/min'))
+
+      command(:set_quota, TYPE => THROTTLE, TABLE => @test_name, LIMIT => NONE)
+      output = capture_stdout{ command(:list_quotas) }
+      assert(output.include?('0 row(s)'))
+    end
   end
 end


[43/51] [abbrv] hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side

Posted by el...@apache.org.
HBASE-21551 Memory leak when use scan with STREAM at server side


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b854859
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b854859
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b854859

Branch: refs/heads/HBASE-20952
Commit: 3b854859f6fad44cbf31164374569a6ab23f3623
Parents: f49baf2
Author: huzheng <op...@gmail.com>
Authored: Wed Dec 5 22:57:49 2018 +0800
Committer: huzheng <op...@gmail.com>
Committed: Thu Dec 6 10:55:42 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HStoreFile.java   |  3 +-
 .../hbase/regionserver/StoreFileReader.java     |  3 ++
 .../regionserver/TestSwitchToStreamRead.java    | 50 ++++++++++++++++++++
 3 files changed, 55 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
index 4aff949..9c94990 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStoreFile.java
@@ -126,7 +126,8 @@ public class HStoreFile implements StoreFile, StoreFileReader.Listener {
   private final AtomicInteger refCount = new AtomicInteger(0);
 
   // Set implementation must be of concurrent type
-  private final Set<StoreFileReader> streamReaders;
+  @VisibleForTesting
+  final Set<StoreFileReader> streamReaders;
 
   private final boolean noReadahead;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index 3fbddf2..d9008b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -186,6 +186,9 @@ public class StoreFileReader {
     if (!shared) {
       try {
         reader.close(false);
+        if (this.listener != null) {
+          this.listener.storeFileReaderClosed(this);
+        }
       } catch (IOException e) {
         LOG.warn("failed to close stream reader", e);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3b854859/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 815643d..037b13e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -23,8 +23,13 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -33,6 +38,7 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterBase;
@@ -42,6 +48,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Ignore;
@@ -98,6 +105,49 @@ public class TestSwitchToStreamRead {
     UTIL.cleanupTestDir();
   }
 
+  private Set<StoreFileReader> getStreamReaders() {
+    List<HStore> stores = REGION.getStores();
+    Assert.assertEquals(1, stores.size());
+    HStore firstStore = stores.get(0);
+    Assert.assertNotNull(firstStore);
+    Collection<HStoreFile> storeFiles = firstStore.getStorefiles();
+    Assert.assertEquals(1, storeFiles.size());
+    HStoreFile firstSToreFile = storeFiles.iterator().next();
+    Assert.assertNotNull(firstSToreFile);
+    return Collections.unmodifiableSet(firstSToreFile.streamReaders);
+  }
+
+  /**
+   * Test Case for HBASE-21551
+   */
+  @Test
+  public void testStreamReadersCleanup() throws IOException {
+    Set<StoreFileReader> streamReaders = getStreamReaders();
+    Assert.assertEquals(0, getStreamReaders().size());
+    try (RegionScannerImpl scanner = REGION.getScanner(new Scan().setReadType(ReadType.STREAM))) {
+      StoreScanner storeScanner =
+          (StoreScanner) (scanner).getStoreHeapForTesting().getCurrentForTesting();
+      List<StoreFileScanner> sfScanners = storeScanner.getAllScannersForTesting().stream()
+          .filter(kvs -> kvs instanceof StoreFileScanner).map(kvs -> (StoreFileScanner) kvs)
+          .collect(Collectors.toList());
+      Assert.assertEquals(1, sfScanners.size());
+      StoreFileScanner sfScanner = sfScanners.get(0);
+      Assert.assertFalse(sfScanner.getReader().shared);
+
+      // There should be a stream reader
+      Assert.assertEquals(1, getStreamReaders().size());
+    }
+    Assert.assertEquals(0, getStreamReaders().size());
+
+    // The streamsReader should be clear after region close even if there're some opened stream
+    // scanner.
+    RegionScannerImpl scanner = REGION.getScanner(new Scan().setReadType(ReadType.STREAM));
+    Assert.assertNotNull(scanner);
+    Assert.assertEquals(1, getStreamReaders().size());
+    REGION.close();
+    Assert.assertEquals(0, streamReaders.size());
+  }
+
   @Test
   public void test() throws IOException {
     try (RegionScannerImpl scanner = REGION.getScanner(new Scan())) {


[50/51] [abbrv] hbase git commit: HBASE-21554 Show replication endpoint classname for replication peer on master web UI

Posted by el...@apache.org.
HBASE-21554 Show replication endpoint classname for replication peer on master web UI


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d7061a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d7061a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d7061a4

Branch: refs/heads/HBASE-20952
Commit: 8d7061a487357344f10ee260979cc2c47cd833dd
Parents: e0e0694
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Dec 6 10:54:14 2018 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Fri Dec 7 13:31:59 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon     | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d7061a4/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 7bb6c40..da44052 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -621,6 +621,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
     <tr>
         <th>Peer Id</th>
         <th>Cluster Key</th>
+        <th>Endpoint</th>
         <th>State</th>
         <th>IsSerial</th>
         <th>Remote WAL</th>
@@ -641,6 +642,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
     <tr>
         <td><% peerId %></td>
         <td><% peerConfig.getClusterKey() %></td>
+        <td><% peerConfig.getReplicationEndpointImpl() %></td>
         <td><% peer.isEnabled() ? "ENABLED" : "DISABLED" %></td>
         <td><% peerConfig.isSerial() %></td>
         <td><% peerConfig.getRemoteWALDir() == null ? "" : peerConfig.getRemoteWALDir() %>


[37/51] [abbrv] hbase git commit: HBASE-21534 TestAssignmentManager is flakey

Posted by el...@apache.org.
HBASE-21534 TestAssignmentManager is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d525ec6a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d525ec6a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d525ec6a

Branch: refs/heads/HBASE-20952
Commit: d525ec6a1214f97bda560095f9775ca96d82f030
Parents: 5c209f2
Author: Duo Zhang <zh...@apache.org>
Authored: Fri Nov 30 15:26:04 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Dec 1 20:41:11 2018 +0800

----------------------------------------------------------------------
 .../master/assignment/TestAssignmentManagerBase.java  | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d525ec6a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index 7ab37bc..7b5c550 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertNotEquals;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.io.UncheckedIOException;
 import java.net.SocketTimeoutException;
 import java.util.Arrays;
 import java.util.NavigableMap;
@@ -38,8 +39,10 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
@@ -271,7 +274,18 @@ public abstract class TestAssignmentManagerBase {
   }
 
   protected void doCrash(final ServerName serverName) {
+    this.master.getServerManager().moveFromOnlineToDeadServers(serverName);
     this.am.submitServerCrash(serverName, false/* No WALs here */);
+    // add a new server to avoid killing all the region servers which may hang the UTs
+    int maxPort = this.master.getServerManager().getOnlineServersList().stream()
+      .mapToInt(ServerName::getPort).max().getAsInt();
+    ServerName newSn = ServerName.valueOf("localhost", 100 + maxPort + 1, 1);
+    try {
+      this.master.getServerManager().regionServerReport(newSn, ServerMetricsBuilder.of(newSn));
+    } catch (YouAreDeadException e) {
+      // should not happen
+      throw new UncheckedIOException(e);
+    }
   }
 
   protected void doRestart(final ServerName serverName) {


[25/51] [abbrv] hbase git commit: HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method

Posted by el...@apache.org.
HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method

Signed-off-by: zhangduo <zh...@apache.org>
Signed-off-by: Anoop Sam John <an...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f15ceca
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f15ceca
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f15ceca

Branch: refs/heads/HBASE-20952
Commit: 6f15cecaed2f1f76bfe1880b7c578ed369daa5d5
Parents: 39bc458
Author: lixiaobao <li...@gmail.com>
Authored: Tue Nov 27 20:30:23 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Nov 27 20:56:23 2018 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/AbstractMultiFileWriter.java      | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f15ceca/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index 2fdab81..43d0ad8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -21,12 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.regionserver.CellSink;
 
 /**
  * Base class for cell sink that separates the provided cells into multiple files.
@@ -119,9 +117,12 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen
 
   @Override
   public void beforeShipped() throws IOException {
-    if (this.writers() != null) {
-      for (StoreFileWriter writer : writers()) {
-        writer.beforeShipped();
+    Collection<StoreFileWriter> writers = writers();
+    if (writers != null) {
+      for (StoreFileWriter writer : writers) {
+        if (writer != null) {
+          writer.beforeShipped();
+        }
       }
     }
   }


[45/51] [abbrv] hbase git commit: Add 'strong' notice that 2.1.1 and 2.0.3 have a memory leak

Posted by el...@apache.org.
Add 'strong' notice that 2.1.1 and 2.0.3 have a memory leak


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12e75a8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12e75a8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12e75a8a

Branch: refs/heads/HBASE-20952
Commit: 12e75a8a635785b279900b6905c86a1617526c72
Parents: 67ab8b8
Author: stack <st...@apache.org>
Authored: Wed Dec 5 21:50:39 2018 -0800
Committer: stack <st...@apache.org>
Committed: Wed Dec 5 21:50:39 2018 -0800

----------------------------------------------------------------------
 src/site/xdoc/downloads.xml | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/12e75a8a/src/site/xdoc/downloads.xml
----------------------------------------------------------------------
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index 7e81afd..5d3f2a6 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -32,6 +32,7 @@ under the License.
   <a href="https://www.apache.org/dyn/closer.cgi#verify">Verify The Integrity Of The Files</a> for
   how to verify your mirrored downloads.
   </p>
+  <p style="color:red;"><strong>NOTE: 2.1.1 and 2.0.3 have a serious memory leak. See HBASE-21551. We are working on replacement releases.</strong></p>
   <section name="Releases">
   <table>
     <tr>


[14/51] [abbrv] hbase git commit: HBASE-21503 Replication normal source can get stuck due potential race conditions between source wal reader and wal provider initialization threads.

Posted by el...@apache.org.
HBASE-21503 Replication normal source can get stuck due potential race conditions between source wal reader and wal provider initialization threads.

Found and analysed by Wellington Chevreuil


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cc845b7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cc845b7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cc845b7

Branch: refs/heads/HBASE-20952
Commit: 5cc845b713853645f1e25b29caa556d79cfdc551
Parents: 5ded294
Author: Duo Zhang <zh...@apache.org>
Authored: Wed Nov 21 09:57:24 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Wed Nov 21 17:14:54 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/wal/AbstractFSWALProvider.java | 64 +++++++++-------
 .../wal/TestRaceBetweenGetWALAndGetWALs.java    | 78 ++++++++++++++++++++
 2 files changed, 117 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc845b7/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index ccdc95f..1f24548 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -24,6 +24,8 @@ import java.util.Collections;
 import java.util.List;
 import java.util.Objects;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -44,6 +46,7 @@ import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.LeaseNotRecoveredException;
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
  * Base class of a WAL Provider that returns a single thread safe WAL that writes to Hadoop FS. By
@@ -86,9 +89,10 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
   protected String logPrefix;
 
   /**
-   * we synchronized on walCreateLock to prevent wal recreation in different threads
+   * We use walCreateLock to prevent wal recreation in different threads, and also prevent getWALs
+   * missing the newly created WAL, see HBASE-21503 for more details.
    */
-  private final Object walCreateLock = new Object();
+  private final ReadWriteLock walCreateLock = new ReentrantReadWriteLock();
 
   /**
    * @param factory factory that made us, identity used for FS layout. may not be null
@@ -119,38 +123,48 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
 
   @Override
   public List<WAL> getWALs() {
-    if (wal == null) {
-      return Collections.emptyList();
+    if (wal != null) {
+      return Lists.newArrayList(wal);
+    }
+    walCreateLock.readLock().lock();
+    try {
+      if (wal == null) {
+        return Collections.emptyList();
+      } else {
+        return Lists.newArrayList(wal);
+      }
+    } finally {
+      walCreateLock.readLock().unlock();
     }
-    List<WAL> wals = new ArrayList<>(1);
-    wals.add(wal);
-    return wals;
   }
 
   @Override
   public T getWAL(RegionInfo region) throws IOException {
     T walCopy = wal;
-    if (walCopy == null) {
-      // only lock when need to create wal, and need to lock since
-      // creating hlog on fs is time consuming
-      synchronized (walCreateLock) {
-        walCopy = wal;
-        if (walCopy == null) {
-          walCopy = createWAL();
-          boolean succ = false;
-          try {
-            walCopy.init();
-            succ = true;
-          } finally {
-            if (!succ) {
-              walCopy.close();
-            }
-          }
-          wal = walCopy;
+    if (walCopy != null) {
+      return walCopy;
+    }
+    walCreateLock.writeLock().lock();
+    try {
+      walCopy = wal;
+      if (walCopy != null) {
+        return walCopy;
+      }
+      walCopy = createWAL();
+      boolean succ = false;
+      try {
+        walCopy.init();
+        succ = true;
+      } finally {
+        if (!succ) {
+          walCopy.close();
         }
       }
+      wal = walCopy;
+      return walCopy;
+    } finally {
+      walCreateLock.writeLock().unlock();
     }
-    return walCopy;
   }
 
   protected abstract T createWAL() throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/5cc845b7/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java
new file mode 100644
index 0000000..26ff118
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestRaceBetweenGetWALAndGetWALs.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.wal;
+
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertSame;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.Future;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+
+/**
+ * Testcase for HBASE-21503.
+ */
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestRaceBetweenGetWALAndGetWALs {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRaceBetweenGetWALAndGetWALs.class);
+
+  private static Future<List<WAL>> GET_WALS_FUTURE;
+
+  private static final class FSWALProvider extends AbstractFSWALProvider<AbstractFSWAL<?>> {
+
+    @Override
+    protected AbstractFSWAL<?> createWAL() throws IOException {
+      // just like what may do in the WALListeners, schedule an asynchronous task to call the
+      // getWALs method.
+      GET_WALS_FUTURE = ForkJoinPool.commonPool().submit(this::getWALs);
+      // sleep a while to make the getWALs arrive before we return
+      Threads.sleep(2000);
+      return Mockito.mock(AbstractFSWAL.class);
+    }
+
+    @Override
+    protected void doInit(Configuration conf) throws IOException {
+    }
+  }
+
+  @Test
+  public void testRace() throws IOException, InterruptedException, ExecutionException {
+    FSWALProvider p = new FSWALProvider();
+    WAL wal = p.getWAL(null);
+    assertNotNull(GET_WALS_FUTURE);
+    List<WAL> wals = GET_WALS_FUTURE.get();
+    assertSame(wal, Iterables.getOnlyElement(wals));
+  }
+}


[39/51] [abbrv] hbase git commit: HBASE-21543 Add 2.0.3. release to downloads page

Posted by el...@apache.org.
HBASE-21543 Add 2.0.3. release to downloads page


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59cfe2e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59cfe2e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59cfe2e3

Branch: refs/heads/HBASE-20952
Commit: 59cfe2e31ba626421678a3a862f889c0c574867d
Parents: dec4913
Author: stack <st...@apache.org>
Authored: Sun Dec 2 21:56:23 2018 -0800
Committer: stack <st...@apache.org>
Committed: Sun Dec 2 21:56:23 2018 -0800

----------------------------------------------------------------------
 src/site/xdoc/downloads.xml | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/59cfe2e3/src/site/xdoc/downloads.xml
----------------------------------------------------------------------
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index e526f9c..7e81afd 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -66,23 +66,23 @@ under the License.
     </tr>
     <tr>
       <td style="test-align: left">
-        2.0.2
+        2.0.3
       </td>
       <td style="test-align: left">
-        2018/06/19
+        2018/12/02
       </td>
       <td style="test-align: left">
-        <a href="https://apache.org/dist/hbase/2.0.2/compatibility_report_2.0.1_2.0.2.html">2.0.1 vs 2.0.2</a>
+        <a href="https://apache.org/dist/hbase/2.0.3/compat-check-report-2.0.2-vs-2.0.3.html">2.0.2 vs 2.0.3</a>
       </td>
       <td style="test-align: left">
-        <a href="https://apache.org/dist/hbase/2.0.2/CHANGES.md">Changes</a>
+        <a href="https://apache.org/dist/hbase/2.0.3/CHANGES.md">Changes</a>
       </td>
       <td style="test-align: left">
-        <a href="https://apache.org/dist/hbase/2.0.2/RELEASENOTES.md">Release Notes</a>
+        <a href="https://apache.org/dist/hbase/2.0.3/RELEASENOTES.md">Release Notes</a>
       </td>
       <td style="test-align: left">
-        <a href="https://www.apache.org/dyn/closer.lua/hbase/2.0.2/hbase-2.0.2-src.tar.gz">src</a> (<a href="https://apache.org/dist/hbase/2.0.2/hbase-2.0.2-src.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/2.0.2/hbase-2.0.2-src.tar.gz.asc">asc</a>) <br />
-        <a href="https://www.apache.org/dyn/closer.lua/hbase/2.0.2/hbase-2.0.2-bin.tar.gz">bin</a> (<a href="https://apache.org/dist/hbase/2.0.2/hbase-2.0.2-bin.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/2.0.2/hbase-2.0.2-bin.tar.gz.asc">asc</a>)
+        <a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-src.tar.gz">src</a> (<a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-src.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-src.tar.gz.asc">asc</a>) <br />
+        <a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-bin.tar.gz">bin</a> (<a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-src.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/2.0.3/hbase-2.0.3-src.tar.gz.asc">asc</a>)
       </td>
     </tr>
     <tr>


[09/51] [abbrv] hbase git commit: HBASE-21494 NPE when loading RecoverStandByProcedure

Posted by el...@apache.org.
HBASE-21494 NPE when loading RecoverStandByProcedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b329e6e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b329e6e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b329e6e3

Branch: refs/heads/HBASE-20952
Commit: b329e6e3f271bc22ec4a6f4dd71a8e8b422db3d0
Parents: f555258
Author: zhangduo <zh...@apache.org>
Authored: Sun Nov 18 15:58:53 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Mon Nov 19 09:35:18 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java |   4 +-
 .../SyncReplicationReplayWALManager.java        |   4 +-
 .../TestRegisterPeerWorkerWhenRestarting.java   | 127 +++++++++++++++++++
 ...tPeerSyncReplicationStateProcedureRetry.java |   5 +
 4 files changed, 137 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b329e6e3/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 31dc208..e1d3740 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -778,7 +778,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.splitOrMergeTracker.start();
 
     this.replicationPeerManager = ReplicationPeerManager.create(zooKeeper, conf);
-    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
 
     this.drainingServerTracker = new DrainingServerTracker(zooKeeper, this, this.serverManager);
     this.drainingServerTracker.start();
@@ -949,7 +948,10 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
 
     status.setStatus("Initialize ServerManager and schedule SCP for crash servers");
+    // The below two managers must be created before loading procedures, as they will be used during
+    // loading.
     this.serverManager = createServerManager(this);
+    this.syncReplicationReplayWALManager = new SyncReplicationReplayWALManager(this);
     createProcedureExecutor();
     @SuppressWarnings("rawtypes")
     Map<Class<? extends Procedure>, List<Procedure<MasterProcedureEnv>>> procsByType =

http://git-wip-us.apache.org/repos/asf/hbase/blob/b329e6e3/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java
index 89e97bb..ae624b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/SyncReplicationReplayWALManager.java
@@ -146,12 +146,12 @@ public class SyncReplicationReplayWALManager {
     this.fs = services.getMasterFileSystem().getWALFileSystem();
     this.walRootDir = services.getMasterFileSystem().getWALRootDir();
     this.remoteWALDir = new Path(this.walRootDir, ReplicationUtils.REMOTE_WAL_DIR_NAME);
-    MasterProcedureScheduler scheduler =
-      services.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler();
     serverManager.registerListener(new ServerListener() {
 
       @Override
       public void serverAdded(ServerName serverName) {
+        MasterProcedureScheduler scheduler =
+          services.getMasterProcedureExecutor().getEnvironment().getProcedureScheduler();
         for (UsedReplayWorkersForPeer usedWorkers : usedWorkersByPeer.values()) {
           synchronized (usedWorkers) {
             usedWorkers.wake(scheduler);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b329e6e3/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
new file mode 100644
index 0000000..72aa32d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestRegisterPeerWorkerWhenRestarting.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.DISPATCH_WALS_VALUE;
+import static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RecoverStandbyState.UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.replication.SyncReplicationState;
+import org.apache.hadoop.hbase.replication.SyncReplicationTestBase;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.apache.zookeeper.KeeperException;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Testcase for HBASE-21494.
+ */
+@Category({ MasterTests.class, LargeTests.class })
+public class TestRegisterPeerWorkerWhenRestarting extends SyncReplicationTestBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestRegisterPeerWorkerWhenRestarting.class);
+
+  private static volatile boolean FAIL = false;
+
+  public static final class HMasterForTest extends HMaster {
+
+    public HMasterForTest(Configuration conf) throws IOException, KeeperException {
+      super(conf);
+    }
+
+    @Override
+    public void remoteProcedureCompleted(long procId) {
+      if (FAIL && getMasterProcedureExecutor()
+        .getProcedure(procId) instanceof SyncReplicationReplayWALRemoteProcedure) {
+        throw new RuntimeException("Inject error");
+      }
+      super.remoteProcedureCompleted(procId);
+    }
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL2.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class);
+    SyncReplicationTestBase.setUp();
+  }
+
+  @Test
+  public void testRestart() throws Exception {
+    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.STANDBY);
+    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.ACTIVE);
+
+    UTIL1.getAdmin().disableReplicationPeer(PEER_ID);
+    write(UTIL1, 0, 100);
+    Thread.sleep(2000);
+    // peer is disabled so no data have been replicated
+    verifyNotReplicatedThroughRegion(UTIL2, 0, 100);
+
+    // transit the A to DA first to avoid too many error logs.
+    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.DOWNGRADE_ACTIVE);
+    HMaster master = UTIL2.getHBaseCluster().getMaster();
+    // make sure the transiting can not succeed
+    FAIL = true;
+    ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
+    Thread t = new Thread() {
+
+      @Override
+      public void run() {
+        try {
+          UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+            SyncReplicationState.DOWNGRADE_ACTIVE);
+        } catch (IOException e) {
+          throw new UncheckedIOException(e);
+        }
+      }
+    };
+    t.start();
+    // wait until we are in the states where we need to register peer worker when restarting
+    UTIL2.waitFor(60000,
+      () -> procExec.getProcedures().stream().filter(p -> p instanceof RecoverStandbyProcedure)
+        .map(p -> (RecoverStandbyProcedure) p)
+        .anyMatch(p -> p.getCurrentStateId() == DISPATCH_WALS_VALUE ||
+          p.getCurrentStateId() == UNREGISTER_PEER_FROM_WORKER_STORAGE_VALUE));
+    // failover to another master
+    MasterThread mt = UTIL2.getMiniHBaseCluster().getMasterThread();
+    mt.getMaster().abort("for testing");
+    mt.join();
+    FAIL = false;
+    t.join();
+    // make sure the new master can finish the transiting
+    assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+      UTIL2.getAdmin().getReplicationPeerSyncReplicationState(PEER_ID));
+    verify(UTIL2, 0, 100);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b329e6e3/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java
index 1c4a819..9b73039 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestTransitPeerSyncReplicationStateProcedureRetry.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.IOException;
 import java.io.UncheckedIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -90,5 +92,8 @@ public class TestTransitPeerSyncReplicationStateProcedureRetry extends SyncRepli
       .mapToLong(Procedure::getProcId).min().getAsLong();
     MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId);
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    assertEquals(SyncReplicationState.DOWNGRADE_ACTIVE,
+      UTIL2.getAdmin().getReplicationPeerSyncReplicationState(PEER_ID));
+    verify(UTIL2, 0, 100);
   }
 }


[30/51] [abbrv] hbase git commit: HBASE-21524 Fix logging in ConnectionImplementation.isTableAvailable()

Posted by el...@apache.org.
HBASE-21524 Fix logging in ConnectionImplementation.isTableAvailable()

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a68f0d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a68f0d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a68f0d6

Branch: refs/heads/HBASE-20952
Commit: 8a68f0d65636bb35e768d9e80e8d3d48d00b58c9
Parents: f1f2b5a
Author: Josh Elser <el...@apache.org>
Authored: Wed Nov 28 22:03:55 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Thu Nov 29 11:40:06 2018 -0500

----------------------------------------------------------------------
 .../hbase/client/ConnectionImplementation.java  | 20 ++++++++------------
 1 file changed, 8 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a68f0d6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 67fe551..da6b592 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -626,7 +626,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     checkClosed();
     try {
       if (!isTableEnabled(tableName)) {
-        LOG.debug("Table " + tableName + " not enabled");
+        LOG.debug("Table {} not enabled", tableName);
         return false;
       }
       List<Pair<RegionInfo, ServerName>> locations =
@@ -637,10 +637,8 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       for (Pair<RegionInfo, ServerName> pair : locations) {
         RegionInfo info = pair.getFirst();
         if (pair.getSecond() == null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Table " + tableName + " has not deployed region " + pair.getFirst()
-                .getEncodedName());
-          }
+          LOG.debug("Table {} has not deployed region {}", tableName,
+              pair.getFirst().getEncodedName());
           notDeployed++;
         } else if (splitKeys != null
             && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
@@ -658,23 +656,21 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       }
       if (notDeployed > 0) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Table " + tableName + " has " + notDeployed + " regions");
+          LOG.debug("Table {} has {} regions not deployed", tableName, notDeployed);
         }
         return false;
       } else if (splitKeys != null && regionCount != splitKeys.length + 1) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Table " + tableName + " expected to have " + (splitKeys.length + 1)
-              + " regions, but only " + regionCount + " available");
+          LOG.debug("Table {} expected to have {} regions, but only {} available", tableName,
+              splitKeys.length + 1, regionCount);
         }
         return false;
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Table " + tableName + " should be available");
-        }
+        LOG.trace("Table {} should be available", tableName);
         return true;
       }
     } catch (TableNotFoundException tnfe) {
-      LOG.warn("Table " + tableName + " not enabled, it is not exists");
+      LOG.warn("Table {} does not exist", tableName);
       return false;
     }
   }


[07/51] [abbrv] hbase git commit: HBASE-21489 TestShell is broken

Posted by el...@apache.org.
HBASE-21489 TestShell is broken

Signed-off-by Duo Zhang <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f874232a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f874232a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f874232a

Branch: refs/heads/HBASE-20952
Commit: f874232a679c4806e75f14dd16b3d9c9648cef59
Parents: 825e14b
Author: Reid Chan <re...@apache.org>
Authored: Sat Nov 17 13:20:46 2018 +0800
Committer: Reid Chan <re...@apache.org>
Committed: Sun Nov 18 12:14:52 2018 +0800

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/hbase/security.rb | 28 ++++++++++++++----------
 1 file changed, 16 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f874232a/hbase-shell/src/main/ruby/hbase/security.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/security.rb b/hbase-shell/src/main/ruby/hbase/security.rb
index f16ee6a..5275898 100644
--- a/hbase-shell/src/main/ruby/hbase/security.rb
+++ b/hbase-shell/src/main/ruby/hbase/security.rb
@@ -142,21 +142,25 @@ module Hbase
       res = {}
       count = 0
       all_perms.each do |value|
-        user_name = String.from_java_bytes(value.getUser)
+        user_name = value.getUser
+        permission = value.getPermission
+        table = ''
+        family = ''
+        qualifier = ''
         if !table_regex.nil? && isNamespace?(table_regex)
-          namespace = value.getNamespace
+          nsPerm = permission.to_java(org.apache.hadoop.hbase.security.access.NamespacePermission)
+          namespace = nsPerm.getNamespace
         else
-          namespace = !value.getTableName.nil? ? value.getTableName.getNamespaceAsString : value.getNamespace
+          tblPerm = permission.to_java(org.apache.hadoop.hbase.security.access.TablePermission)
+          namespace = tblPerm.getNamespace
+          table = !tblPerm.getTableName.nil? ? tblPerm.getTableName.getNameAsString : ''
+          family = !tblPerm.getFamily.nil? ?
+                    org.apache.hadoop.hbase.util.Bytes.toStringBinary(tblPerm.getFamily) : ''
+          qualifier = !tblPerm.getQualifier.nil? ?
+                       org.apache.hadoop.hbase.util.Bytes.toStringBinary(tblPerm.getQualifier) : ''
         end
-        table = !value.getTableName.nil? ? value.getTableName.getNameAsString : ''
-        family = !value.getFamily.nil? ?
-          org.apache.hadoop.hbase.util.Bytes.toStringBinary(value.getFamily) :
-          ''
-        qualifier = !value.getQualifier.nil? ?
-          org.apache.hadoop.hbase.util.Bytes.toStringBinary(value.getQualifier) :
-          ''
-
-        action = org.apache.hadoop.hbase.security.access.Permission.new value.getActions
+
+        action = org.apache.hadoop.hbase.security.access.Permission.new permission.getActions
 
         if block_given?
           yield(user_name, "#{namespace},#{table},#{family},#{qualifier}: #{action}")


[22/51] [abbrv] hbase git commit: HBASE-21498 Master OOM when SplitTableRegionProcedure new CacheConfig and instantiate a new BlockCache

Posted by el...@apache.org.
HBASE-21498 Master OOM when SplitTableRegionProcedure new CacheConfig and instantiate a new BlockCache


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/34e1d744
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/34e1d744
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/34e1d744

Branch: refs/heads/HBASE-20952
Commit: 34e1d744f7d3d836deeda0704711afc969b37f16
Parents: 1acbd36
Author: Guanghao Zhang <zg...@apache.org>
Authored: Mon Nov 19 22:10:43 2018 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Tue Nov 27 14:20:15 2018 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/io/hfile/CacheConfig.java      |  44 ++-----
 .../apache/hadoop/hbase/mob/MobCacheConfig.java |   5 -
 .../hbase/regionserver/HRegionServer.java       |  11 +-
 .../hbase/io/encoding/TestEncodedSeekers.java   |   1 +
 .../hbase/io/hfile/TestBlockCacheReporting.java |  16 ++-
 .../hadoop/hbase/io/hfile/TestCacheConfig.java  |   4 +
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |   2 +-
 .../io/hfile/TestForceCacheImportantBlocks.java |   1 +
 .../hbase/io/hfile/TestHFileBlockIndex.java     | 115 +++++++++----------
 .../hadoop/hbase/io/hfile/TestPrefetch.java     |   1 +
 .../io/hfile/TestScannerFromBucketCache.java    |   1 +
 .../TestScannerSelectionUsingKeyRange.java      |   1 +
 .../io/hfile/TestScannerSelectionUsingTTL.java  |   1 +
 .../hbase/regionserver/TestBlocksRead.java      |   1 +
 .../hbase/regionserver/TestBlocksScanned.java   |   2 +-
 .../regionserver/TestCacheOnWriteInSchema.java  |   1 +
 .../regionserver/TestCompoundBloomFilter.java   |   1 +
 .../hbase/regionserver/TestHStoreFile.java      |   2 +-
 .../hbase/regionserver/TestRecoveredEdits.java  |   2 +
 19 files changed, 99 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
index 147568e..a022552 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CacheConfig.java
@@ -215,7 +215,7 @@ public class CacheConfig {
    * @param family column family configuration
    */
   public CacheConfig(Configuration conf, ColumnFamilyDescriptor family) {
-    this(CacheConfig.instantiateBlockCache(conf),
+    this(GLOBAL_BLOCK_CACHE_INSTANCE,
         conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ)
            && family.isBlockCacheEnabled(),
         family.isInMemory(),
@@ -245,14 +245,10 @@ public class CacheConfig {
    * @param conf hbase configuration
    */
   public CacheConfig(Configuration conf) {
-    this(conf, true);
-  }
-
-  public CacheConfig(Configuration conf, boolean enableBlockCache) {
-    this(conf, enableBlockCache,
+    this(GLOBAL_BLOCK_CACHE_INSTANCE,
         conf.getBoolean(CACHE_DATA_ON_READ_KEY, DEFAULT_CACHE_DATA_ON_READ),
         DEFAULT_IN_MEMORY, // This is a family-level setting so can't be set
-                           // strictly from conf
+        // strictly from conf
         conf.getBoolean(CACHE_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_DATA_ON_WRITE),
         conf.getBoolean(CACHE_INDEX_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_INDEXES_ON_WRITE),
         conf.getBoolean(CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, DEFAULT_CACHE_BLOOMS_ON_WRITE),
@@ -263,28 +259,6 @@ public class CacheConfig {
     LOG.info("Created cacheConfig: " + this);
   }
 
-  private CacheConfig(Configuration conf, boolean enableBlockCache,
-      final boolean cacheDataOnRead, final boolean inMemory,
-      final boolean cacheDataOnWrite, final boolean cacheIndexesOnWrite,
-      final boolean cacheBloomsOnWrite, final boolean evictOnClose,
-      final boolean cacheDataCompressed, final boolean prefetchOnOpen,
-      final boolean dropBehindCompaction) {
-    if (enableBlockCache) {
-      this.blockCache = CacheConfig.instantiateBlockCache(conf);
-    } else {
-      this.blockCache = null;
-    }
-    this.cacheDataOnRead = cacheDataOnRead;
-    this.inMemory = inMemory;
-    this.cacheDataOnWrite = cacheDataOnWrite;
-    this.cacheIndexesOnWrite = cacheIndexesOnWrite;
-    this.cacheBloomsOnWrite = cacheBloomsOnWrite;
-    this.evictOnClose = evictOnClose;
-    this.cacheDataCompressed = cacheDataCompressed;
-    this.prefetchOnOpen = prefetchOnOpen;
-    this.dropBehindCompaction = dropBehindCompaction;
-  }
-
   /**
    * Create a block cache configuration with the specified cache and configuration parameters.
    * @param blockCache reference to block cache, null if completely disabled
@@ -669,12 +643,18 @@ public class CacheConfig {
    * @return The block cache or <code>null</code>.
    */
   public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
-    if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
-    if (blockCacheDisabled) return null;
+    if (GLOBAL_BLOCK_CACHE_INSTANCE != null) {
+      return GLOBAL_BLOCK_CACHE_INSTANCE;
+    }
+    if (blockCacheDisabled) {
+      return null;
+    }
     LruBlockCache onHeapCache = getOnHeapCacheInternal(conf);
     // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the
     // call.
-    if (blockCacheDisabled) return null;
+    if (blockCacheDisabled) {
+      return null;
+    }
     boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
     if (useExternal) {
       L2_CACHE_INSTANCE = getExternalBlockcache(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
index 971bb92..2305eba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobCacheConfig.java
@@ -42,11 +42,6 @@ public class MobCacheConfig extends CacheConfig {
     instantiateMobFileCache(conf);
   }
 
-  public MobCacheConfig(Configuration conf, boolean needBlockCache) {
-    super(conf, needBlockCache);
-    instantiateMobFileCache(conf);
-  }
-
   /**
    * Instantiates the MobFileCache.
    * @param conf The current configuration.

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index c6e3eee..b9d606d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -595,12 +595,17 @@ public class HRegionServer extends HasThread implements
       // init superusers and add the server principal (if using security)
       // or process owner as default super user.
       Superusers.initialize(conf);
-
       regionServerAccounting = new RegionServerAccounting(conf);
+
       boolean isMasterNotCarryTable =
           this instanceof HMaster && !LoadBalancer.isTablesOnMaster(conf);
-      cacheConfig = new CacheConfig(conf, !isMasterNotCarryTable);
-      mobCacheConfig = new MobCacheConfig(conf, !isMasterNotCarryTable);
+      // no need to instantiate global block cache when master not carry table
+      if (!isMasterNotCarryTable) {
+        CacheConfig.instantiateBlockCache(conf);
+      }
+      cacheConfig = new CacheConfig(conf);
+      mobCacheConfig = new MobCacheConfig(conf);
+
       uncaughtExceptionHandler = new UncaughtExceptionHandler() {
         @Override
         public void uncaughtException(Thread t, Throwable e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
index b757e1c..11d7bb4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestEncodedSeekers.java
@@ -112,6 +112,7 @@ public class TestEncodedSeekers {
     if(includeTags) {
       testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3);
     }
+    CacheConfig.instantiateBlockCache(testUtil.getConfiguration());
     LruBlockCache cache =
       (LruBlockCache)new CacheConfig(testUtil.getConfiguration()).getBlockCache();
     cache.clearCache();

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
index b8b5e88..19919e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestBlockCacheReporting.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.io.hfile;
 
 import static org.junit.Assert.*;
 
-import com.fasterxml.jackson.core.JsonGenerationException;
-import com.fasterxml.jackson.databind.JsonMappingException;
 import java.io.IOException;
 import java.util.Map;
 import java.util.NavigableSet;
@@ -84,9 +82,10 @@ public class TestBlockCacheReporting {
   }
 
   @Test
-  public void testBucketCache() throws JsonGenerationException, JsonMappingException, IOException {
+  public void testBucketCache() throws IOException {
     this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "offheap");
     this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, 100);
+    CacheConfig.instantiateBlockCache(this.conf);
     CacheConfig cc = new CacheConfig(this.conf);
     assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
     logPerBlock(cc.getBlockCache());
@@ -102,7 +101,8 @@ public class TestBlockCacheReporting {
   }
 
   @Test
-  public void testLruBlockCache() throws JsonGenerationException, JsonMappingException, IOException {
+  public void testLruBlockCache() throws IOException {
+    CacheConfig.instantiateBlockCache(this.conf);
     CacheConfig cc = new CacheConfig(this.conf);
     assertTrue(cc.isBlockCacheEnabled());
     assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
@@ -131,8 +131,7 @@ public class TestBlockCacheReporting {
     }
   }
 
-  private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf)
-  throws JsonGenerationException, JsonMappingException, IOException {
+  private void logPerFile(final BlockCacheUtil.CachedBlocksByFile cbsbf) throws IOException {
     for (Map.Entry<String, NavigableSet<CachedBlock>> e:
         cbsbf.getCachedBlockStatsByFile().entrySet()) {
       int count = 0;
@@ -154,10 +153,9 @@ public class TestBlockCacheReporting {
     }
   }
 
-  private BlockCacheUtil.CachedBlocksByFile logPerBlock(final BlockCache bc)
-  throws JsonGenerationException, JsonMappingException, IOException {
+  private BlockCacheUtil.CachedBlocksByFile logPerBlock(final BlockCache bc) throws IOException {
     BlockCacheUtil.CachedBlocksByFile cbsbf = new BlockCacheUtil.CachedBlocksByFile();
-    for (CachedBlock cb: bc) {
+    for (CachedBlock cb : bc) {
       LOG.info(cb.toString());
       LOG.info(BlockCacheUtil.toJSON(bc));
       cbsbf.update(cb);

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index f84a319..7b6bbb3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -209,6 +209,7 @@ public class TestCacheConfig {
   @Test
   public void testDisableCacheDataBlock() throws IOException {
     Configuration conf = HBaseConfiguration.create();
+    CacheConfig.instantiateBlockCache(conf);
     CacheConfig cacheConfig = new CacheConfig(conf);
     assertTrue(cacheConfig.shouldCacheBlockOnRead(BlockCategory.DATA));
     assertFalse(cacheConfig.shouldCacheCompressed(BlockCategory.DATA));
@@ -274,6 +275,7 @@ public class TestCacheConfig {
 
   @Test
   public void testCacheConfigDefaultLRUBlockCache() {
+    CacheConfig.instantiateBlockCache(this.conf);
     CacheConfig cc = new CacheConfig(this.conf);
     assertTrue(cc.isBlockCacheEnabled());
     assertTrue(CacheConfig.DEFAULT_IN_MEMORY == cc.isInMemory());
@@ -307,6 +309,7 @@ public class TestCacheConfig {
   private void doBucketCacheConfigTest() {
     final int bcSize = 100;
     this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
+    CacheConfig.instantiateBlockCache(this.conf);
     CacheConfig cc = new CacheConfig(this.conf);
     basicBlockCacheOps(cc, false, false);
     assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);
@@ -338,6 +341,7 @@ public class TestCacheConfig {
     long bcExpectedSize = 100 * 1024 * 1024; // MB.
     assertTrue(lruExpectedSize < bcExpectedSize);
     this.conf.setInt(HConstants.BUCKET_CACHE_SIZE_KEY, bcSize);
+    CacheConfig.instantiateBlockCache(this.conf);
     CacheConfig cc = new CacheConfig(this.conf);
     basicBlockCacheOps(cc, false, false);
     assertTrue(cc.getBlockCache() instanceof CombinedBlockCache);

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
index 19d845c..9c2f6df 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.java
@@ -160,6 +160,7 @@ public class TestCacheOnWrite {
     Configuration conf = TEST_UTIL.getConfiguration();
     List<BlockCache> blockcaches = new ArrayList<>();
     // default
+    CacheConfig.instantiateBlockCache(conf);
     blockcaches.add(new CacheConfig(conf).getBlockCache());
 
     //set LruBlockCache.LRU_HARD_CAPACITY_LIMIT_FACTOR_CONFIG_NAME to 2.0f due to HBASE-16287
@@ -228,7 +229,6 @@ public class TestCacheOnWrite {
     conf.setBoolean(CacheConfig.CACHE_DATA_BLOCKS_COMPRESSED_KEY, cacheCompressedData);
     cowType.modifyConf(conf);
     fs = HFileSystem.get(conf);
-    CacheConfig.GLOBAL_BLOCK_CACHE_INSTANCE = blockCache;
     cacheConf =
         new CacheConfig(blockCache, true, true, cowType.shouldBeCached(BlockType.DATA),
         cowType.shouldBeCached(BlockType.LEAF_INDEX),

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
index a90b572..5612c1b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestForceCacheImportantBlocks.java
@@ -106,6 +106,7 @@ public class TestForceCacheImportantBlocks {
     // Make sure we make a new one each time.
     CacheConfig.clearGlobalInstances();
     HFile.DATABLOCK_READ_COUNT.reset();
+    CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
index 890ea72..efe76aa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.java
@@ -524,67 +524,59 @@ public class TestHFileBlockIndex {
   * @throws IOException
   */
   @Test
- public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
-   Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
-       "hfile_for_midkey");
-   int maxChunkSize = 512;
-   conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
-   // should open hfile.block.index.cacheonwrite
-   conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
-
-   CacheConfig cacheConf = new CacheConfig(conf);
-   BlockCache blockCache = cacheConf.getBlockCache();
-   // Evict all blocks that were cached-on-write by the previous invocation.
-   blockCache.evictBlocksByHfileName(hfilePath.getName());
-   // Write the HFile
-   {
-     HFileContext meta = new HFileContextBuilder()
-                         .withBlockSize(SMALL_BLOCK_SIZE)
-                         .withCompression(Algorithm.NONE)
-                         .withDataBlockEncoding(DataBlockEncoding.NONE)
-                         .build();
-     HFile.Writer writer =
-           HFile.getWriterFactory(conf, cacheConf)
-               .withPath(fs, hfilePath)
-               .withFileContext(meta)
-               .create();
-     Random rand = new Random(19231737);
-     byte[] family = Bytes.toBytes("f");
-     byte[] qualifier = Bytes.toBytes("q");
-     int kvNumberToBeWritten = 16;
-     // the new generated hfile will contain 2 leaf-index blocks and 16 data blocks,
-     // midkey is just on the boundary of the first leaf-index block
-     for (int i = 0; i < kvNumberToBeWritten; ++i) {
-       byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 30);
-
-       // Key will be interpreted by KeyValue.KEY_COMPARATOR
-       KeyValue kv =
-             new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
-                 RandomKeyValueUtil.randomFixedLengthValue(rand, SMALL_BLOCK_SIZE));
-       writer.append(kv);
-     }
-     writer.close();
-   }
-
-   // close hfile.block.index.cacheonwrite
-   conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
-
-   // Read the HFile
-   HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
-
-   boolean hasArrayIndexOutOfBoundsException = false;
-   try {
-     // get the mid-key.
-     reader.midKey();
-   } catch (ArrayIndexOutOfBoundsException e) {
-     hasArrayIndexOutOfBoundsException = true;
-   } finally {
-     reader.close();
-   }
-
-   // to check if ArrayIndexOutOfBoundsException occurred
-   assertFalse(hasArrayIndexOutOfBoundsException);
- }
+  public void testMidKeyOnLeafIndexBlockBoundary() throws IOException {
+    Path hfilePath = new Path(TEST_UTIL.getDataTestDir(), "hfile_for_midkey");
+    int maxChunkSize = 512;
+    conf.setInt(HFileBlockIndex.MAX_CHUNK_SIZE_KEY, maxChunkSize);
+    // should open hfile.block.index.cacheonwrite
+    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, true);
+    CacheConfig.instantiateBlockCache(conf);
+    CacheConfig cacheConf = new CacheConfig(conf);
+    BlockCache blockCache = cacheConf.getBlockCache();
+    // Evict all blocks that were cached-on-write by the previous invocation.
+    blockCache.evictBlocksByHfileName(hfilePath.getName());
+    // Write the HFile
+    HFileContext meta =
+        new HFileContextBuilder().withBlockSize(SMALL_BLOCK_SIZE).withCompression(Algorithm.NONE)
+            .withDataBlockEncoding(DataBlockEncoding.NONE).build();
+    HFile.Writer writer =
+        HFile.getWriterFactory(conf, cacheConf).withPath(fs, hfilePath).withFileContext(meta)
+            .create();
+    Random rand = new Random(19231737);
+    byte[] family = Bytes.toBytes("f");
+    byte[] qualifier = Bytes.toBytes("q");
+    int kvNumberToBeWritten = 16;
+    // the new generated hfile will contain 2 leaf-index blocks and 16 data blocks,
+    // midkey is just on the boundary of the first leaf-index block
+    for (int i = 0; i < kvNumberToBeWritten; ++i) {
+      byte[] row = RandomKeyValueUtil.randomOrderedFixedLengthKey(rand, i, 30);
+
+      // Key will be interpreted by KeyValue.KEY_COMPARATOR
+      KeyValue kv = new KeyValue(row, family, qualifier, EnvironmentEdgeManager.currentTime(),
+          RandomKeyValueUtil.randomFixedLengthValue(rand, SMALL_BLOCK_SIZE));
+      writer.append(kv);
+    }
+    writer.close();
+
+    // close hfile.block.index.cacheonwrite
+    conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
+
+    // Read the HFile
+    HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConf, true, conf);
+
+    boolean hasArrayIndexOutOfBoundsException = false;
+    try {
+      // get the mid-key.
+      reader.midKey();
+    } catch (ArrayIndexOutOfBoundsException e) {
+      hasArrayIndexOutOfBoundsException = true;
+    } finally {
+      reader.close();
+    }
+
+    // to check if ArrayIndexOutOfBoundsException occurred
+    assertFalse(hasArrayIndexOutOfBoundsException);
+  }
 
   /**
    * Testing block index through the HFile writer/reader APIs. Allows to test
@@ -597,6 +589,7 @@ public class TestHFileBlockIndex {
   public void testHFileWriterAndReader() throws IOException {
     Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
         "hfile_for_block_index");
+    CacheConfig.instantiateBlockCache(conf);
     CacheConfig cacheConf = new CacheConfig(conf);
     BlockCache blockCache = cacheConf.getBlockCache();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
index 91a9238..811df14 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestPrefetch.java
@@ -64,6 +64,7 @@ public class TestPrefetch {
     conf.setBoolean(CacheConfig.PREFETCH_BLOCKS_ON_OPEN_KEY, true);
     fs = HFileSystem.get(conf);
     CacheConfig.blockCacheDisabled = false;
+    CacheConfig.instantiateBlockCache(conf);
     cacheConf = new CacheConfig(conf);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
index 06d617a..18e8e70 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerFromBucketCache.java
@@ -88,6 +88,7 @@ public class TestScannerFromBucketCache {
       conf.setFloat("hbase.regionserver.global.memstore.size", 0.1f);
     }
     tableName = TableName.valueOf(name.getMethodName());
+    CacheConfig.instantiateBlockCache(conf);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
index c576329..d27b041 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingKeyRange.java
@@ -124,6 +124,7 @@ public class TestScannerSelectionUsingKeyRange {
 
     Scan scan = new Scan(Bytes.toBytes("aaa"), Bytes.toBytes("aaz"));
     CacheConfig.blockCacheDisabled = false;
+    CacheConfig.instantiateBlockCache(conf);
     CacheConfig cacheConf = new CacheConfig(conf);
     LruBlockCache cache = (LruBlockCache) cacheConf.getBlockCache();
     cache.clearCache();

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
index 08a7be2..444102d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestScannerSelectionUsingTTL.java
@@ -104,6 +104,7 @@ public class TestScannerSelectionUsingTTL {
   @Test
   public void testScannerSelection() throws IOException {
     Configuration conf = TEST_UTIL.getConfiguration();
+    CacheConfig.instantiateBlockCache(conf);
     conf.setBoolean("hbase.store.delete.expired.storefile", false);
     HColumnDescriptor hcd =
       new HColumnDescriptor(FAMILY_BYTES)

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
index 73b596a..2cf3f8c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksRead.java
@@ -76,6 +76,7 @@ public class TestBlocksRead  {
   public static void setUp() throws Exception {
     // disable compactions in this test.
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10000);
+    CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
index 7db34ac..0ba4e97 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestBlocksScanned.java
@@ -61,8 +61,8 @@ public class TestBlocksScanned extends HBaseTestCase {
   @Before
   public void setUp() throws Exception {
     super.setUp();
-
     TEST_UTIL = new HBaseTestingUtility();
+    CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
index 5c73a6f..dc51dae 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCacheOnWriteInSchema.java
@@ -161,6 +161,7 @@ public class TestCacheOnWriteInSchema {
     conf.setBoolean(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY, false);
     conf.setBoolean(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY, false);
     conf.setBoolean(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY, false);
+    CacheConfig.instantiateBlockCache(conf);
 
     fs = HFileSystem.get(conf);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
index 424a788..4c2d645 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompoundBloomFilter.java
@@ -139,6 +139,7 @@ public class TestCompoundBloomFilter {
 
     fs = FileSystem.get(conf);
 
+    CacheConfig.instantiateBlockCache(conf);
     cacheConf = new CacheConfig(conf);
     blockCache = cacheConf.getBlockCache();
     assertNotNull(blockCache);

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 4b8f5f2..30ee3b2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -924,7 +924,6 @@ public class TestHStoreFile extends HBaseTestCase {
     scan.setTimeRange(27, 50);
     scan.setColumnFamilyTimeRange(family, 7, 50);
     assertTrue(scanner.shouldUseScanner(scan, store, Long.MIN_VALUE));
-
   }
 
   @Test
@@ -935,6 +934,7 @@ public class TestHStoreFile extends HBaseTestCase {
     Path baseDir = new Path(new Path(testDir, "7e0102"),"twoCOWEOC");
 
     // Grab the block cache and get the initial hit/miss counts
+    CacheConfig.instantiateBlockCache(conf);
     BlockCache bc = new CacheConfig(conf).getBlockCache();
     assertNotNull(bc);
     CacheStats cs = bc.getStats();

http://git-wip-us.apache.org/repos/asf/hbase/blob/34e1d744/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
index 34f6ca1..543126e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRecoveredEdits.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -80,6 +81,7 @@ public class TestRecoveredEdits {
   @Test
   public void testReplayWorksThoughLotsOfFlushing() throws
       IOException {
+    CacheConfig.instantiateBlockCache(TEST_UTIL.getConfiguration());
     for(MemoryCompactionPolicy policy : MemoryCompactionPolicy.values()) {
       testReplayWorksWithMemoryCompactionPolicy(policy);
     }


[10/51] [abbrv] hbase git commit: Revert "HBASE-21377 Add debug log for catching the root cause"

Posted by el...@apache.org.
Revert "HBASE-21377 Add debug log for catching the root cause"

This reverts commit 3fe8649b2c9ba1271c25e8f476548907e4c7a90d.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8271c06
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8271c06
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8271c06

Branch: refs/heads/HBASE-20952
Commit: b8271c06d50f985245ce3214a88d5ddacffadd30
Parents: b329e6e
Author: Duo Zhang <zh...@apache.org>
Authored: Mon Nov 19 17:08:41 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Mon Nov 19 17:08:41 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/procedure2/RootProcedureState.java  | 5 -----
 .../hadoop/hbase/procedure2/store/wal/WALProcedureTree.java     | 3 ---
 2 files changed, 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8271c06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
index a7cdaab..2fc0030 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
@@ -24,8 +24,6 @@ import java.util.List;
 import java.util.Set;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -44,8 +42,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
 @InterfaceStability.Evolving
 class RootProcedureState<TEnvironment> {
 
-  private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class);
-
   private enum State {
     RUNNING,         // The Procedure is running or ready to run
     FAILED,          // The Procedure failed, waiting for the rollback executing
@@ -150,7 +146,6 @@ class RootProcedureState<TEnvironment> {
       subprocStack = new ArrayList<>();
     }
     proc.addStackIndex(subprocStack.size());
-    LOG.debug("Add procedure {} as the {}th rollback step", proc, subprocStack.size());
     subprocStack.add(proc);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b8271c06/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
index 6e624b4..c32bd7f 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
@@ -145,9 +145,6 @@ public final class WALProcedureTree {
 
   private void collectStackId(Entry entry, Map<Integer, List<Entry>> stackId2Proc,
       MutableInt maxStackId) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Procedure {} stack ids={}", entry, entry.proc.getStackIdList());
-    }
     for (int i = 0, n = entry.proc.getStackIdCount(); i < n; i++) {
       int stackId = entry.proc.getStackId(i);
       if (stackId > maxStackId.intValue()) {


[02/51] [abbrv] hbase git commit: HBASE-21255 [acl] Refactor TablePermission into three classes (Global, Namespace, Table)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index b6d8fe9..34480d3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -131,14 +131,15 @@ public class AccessControlLists {
    * @throws IOException in the case of an error accessing the metadata table
    */
   static void addUserPermission(Configuration conf, UserPermission userPerm, Table t,
-                                boolean mergeExistingPermissions) throws IOException {
-    Permission.Action[] actions = userPerm.getActions();
-    byte[] rowKey = userPermissionRowKey(userPerm);
+      boolean mergeExistingPermissions) throws IOException {
+    Permission permission = userPerm.getPermission();
+    Permission.Action[] actions = permission.getActions();
+    byte[] rowKey = userPermissionRowKey(permission);
     Put p = new Put(rowKey);
     byte[] key = userPermissionKey(userPerm);
 
     if ((actions == null) || (actions.length == 0)) {
-      String msg = "No actions associated with user '" + Bytes.toString(userPerm.getUser()) + "'";
+      String msg = "No actions associated with user '" + userPerm.getUser() + "'";
       LOG.warn(msg);
       throw new IOException(msg);
     }
@@ -148,16 +149,14 @@ public class AccessControlLists {
       List<UserPermission> perms = getUserPermissions(conf, rowKey, null, null, null, false);
       UserPermission currentPerm = null;
       for (UserPermission perm : perms) {
-        if (Bytes.equals(perm.getUser(), userPerm.getUser())
-                && ((userPerm.isGlobal() && ACL_TABLE_NAME.equals(perm.getTableName()))
-                || perm.tableFieldsEqual(userPerm))) {
+        if (userPerm.equalsExceptActions(perm)) {
           currentPerm = perm;
           break;
         }
       }
 
-      if(currentPerm != null && currentPerm.getActions() != null){
-        actionSet.addAll(Arrays.asList(currentPerm.getActions()));
+      if (currentPerm != null && currentPerm.getPermission().getActions() != null){
+        actionSet.addAll(Arrays.asList(currentPerm.getPermission().getActions()));
       }
     }
 
@@ -225,24 +224,27 @@ public class AccessControlLists {
    */
   static void removeUserPermission(Configuration conf, UserPermission userPerm, Table t)
       throws IOException {
-    if (null == userPerm.getActions()) {
+    if (null == userPerm.getPermission().getActions() ||
+        userPerm.getPermission().getActions().length == 0) {
       removePermissionRecord(conf, userPerm, t);
     } else {
       // Get all the global user permissions from the acl table
       List<UserPermission> permsList =
-          getUserPermissions(conf, userPermissionRowKey(userPerm), null, null, null, false);
+        getUserPermissions(conf, userPermissionRowKey(userPerm.getPermission()),
+          null, null, null, false);
       List<Permission.Action> remainingActions = new ArrayList<>();
-      List<Permission.Action> dropActions = Arrays.asList(userPerm.getActions());
+      List<Permission.Action> dropActions = Arrays.asList(userPerm.getPermission().getActions());
       for (UserPermission perm : permsList) {
         // Find the user and remove only the requested permissions
-        if (Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser()))) {
-          for (Permission.Action oldAction : perm.getActions()) {
+        if (perm.getUser().equals(userPerm.getUser())) {
+          for (Permission.Action oldAction : perm.getPermission().getActions()) {
             if (!dropActions.contains(oldAction)) {
               remainingActions.add(oldAction);
             }
           }
           if (!remainingActions.isEmpty()) {
-            perm.setActions(remainingActions.toArray(new Permission.Action[remainingActions.size()]));
+            perm.getPermission().setActions(
+              remainingActions.toArray(new Permission.Action[remainingActions.size()]));
             addUserPermission(conf, perm, t);
           } else {
             removePermissionRecord(conf, userPerm, t);
@@ -258,7 +260,7 @@ public class AccessControlLists {
 
   private static void removePermissionRecord(Configuration conf, UserPermission userPerm, Table t)
       throws IOException {
-    Delete d = new Delete(userPermissionRowKey(userPerm));
+    Delete d = new Delete(userPermissionRowKey(userPerm.getPermission()));
     d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
     try {
       t.delete(d);
@@ -348,14 +350,17 @@ public class AccessControlLists {
     removeTablePermissions(tableName, column, t, true);
   }
 
-  static byte[] userPermissionRowKey(UserPermission userPerm) {
+  static byte[] userPermissionRowKey(Permission permission) {
     byte[] row;
-    if(userPerm.hasNamespace()) {
-      row = Bytes.toBytes(toNamespaceEntry(userPerm.getNamespace()));
-    } else if(userPerm.isGlobal()) {
-      row = ACL_GLOBAL_NAME;
+    if (permission instanceof TablePermission) {
+      TablePermission tablePerm = (TablePermission) permission;
+      row = tablePerm.getTableName().getName();
+    } else if (permission instanceof NamespacePermission) {
+      NamespacePermission nsPerm = (NamespacePermission) permission;
+      row = Bytes.toBytes(toNamespaceEntry(nsPerm.getNamespace()));
     } else {
-      row = userPerm.getTableName().getName();
+      // permission instanceof TablePermission
+      row = ACL_GLOBAL_NAME;
     }
     return row;
   }
@@ -366,10 +371,15 @@ public class AccessControlLists {
    *  username,family
    *  username,family,qualifier
    */
-  static byte[] userPermissionKey(UserPermission userPerm) {
-    byte[] qualifier = userPerm.getQualifier();
-    byte[] family = userPerm.getFamily();
-    byte[] key = userPerm.getUser();
+  static byte[] userPermissionKey(UserPermission permission) {
+    byte[] key = Bytes.toBytes(permission.getUser());
+    byte[] qualifier = null;
+    byte[] family = null;
+    if (permission.getPermission().getAccessScope() == Permission.Scope.TABLE) {
+      TablePermission tablePermission = (TablePermission) permission.getPermission();
+      family = tablePermission.getFamily();
+      qualifier = tablePermission.getQualifier();
+    }
 
     if (family != null && family.length > 0) {
       key = Bytes.add(key, Bytes.add(new byte[]{ACL_KEY_DELIMITER}, family));
@@ -404,14 +414,15 @@ public class AccessControlLists {
    * @return a map of the permissions for this table.
    * @throws IOException
    */
-  static Map<byte[], ListMultimap<String,TablePermission>> loadAll(Region aclRegion)
+  static Map<byte[], ListMultimap<String, UserPermission>> loadAll(Region aclRegion)
       throws IOException {
 
     if (!isAclRegion(aclRegion)) {
       throw new IOException("Can only load permissions from "+ACL_TABLE_NAME);
     }
 
-    Map<byte[], ListMultimap<String, TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
+    Map<byte[], ListMultimap<String, UserPermission>> allPerms =
+      new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
 
     // do a full scan of _acl_ table
 
@@ -426,18 +437,18 @@ public class AccessControlLists {
         List<Cell> row = new ArrayList<>();
 
         boolean hasNext = iScanner.next(row);
-        ListMultimap<String,TablePermission> perms = ArrayListMultimap.create();
+        ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
         byte[] entry = null;
         for (Cell kv : row) {
           if (entry == null) {
             entry = CellUtil.cloneRow(kv);
           }
-          Pair<String, TablePermission> permissionsOfUserOnTable =
+          Pair<String, Permission> permissionsOfUserOnTable =
               parsePermissionRecord(entry, kv, null, null, false, null);
           if (permissionsOfUserOnTable != null) {
             String username = permissionsOfUserOnTable.getFirst();
-            TablePermission permissions = permissionsOfUserOnTable.getSecond();
-            perms.put(username, permissions);
+            Permission permission = permissionsOfUserOnTable.getSecond();
+            perms.put(username, new UserPermission(username, permission));
           }
         }
         if (entry != null) {
@@ -460,9 +471,10 @@ public class AccessControlLists {
    * Load all permissions from the region server holding {@code _acl_},
    * primarily intended for testing purposes.
    */
-  static Map<byte[], ListMultimap<String,TablePermission>> loadAll(
+  static Map<byte[], ListMultimap<String, UserPermission>> loadAll(
       Configuration conf) throws IOException {
-    Map<byte[], ListMultimap<String,TablePermission>> allPerms = new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
+    Map<byte[], ListMultimap<String, UserPermission>> allPerms =
+      new TreeMap<>(Bytes.BYTES_RAWCOMPARATOR);
 
     // do a full scan of _acl_, filtering on only first table region rows
 
@@ -476,7 +488,7 @@ public class AccessControlLists {
         scanner = table.getScanner(scan);
         try {
           for (Result row : scanner) {
-            ListMultimap<String, TablePermission> resultPerms =
+            ListMultimap<String, UserPermission> resultPerms =
                 parsePermissions(row.getRow(), row, null, null, null, false);
             allPerms.put(row.getRow(), resultPerms);
           }
@@ -489,14 +501,14 @@ public class AccessControlLists {
     return allPerms;
   }
 
-  public static ListMultimap<String, TablePermission> getTablePermissions(Configuration conf,
+  public static ListMultimap<String, UserPermission> getTablePermissions(Configuration conf,
       TableName tableName) throws IOException {
     return getPermissions(conf, tableName != null ? tableName.getName() : null, null, null, null,
       null, false);
   }
 
   @VisibleForTesting
-  public static ListMultimap<String, TablePermission> getNamespacePermissions(Configuration conf,
+  public static ListMultimap<String, UserPermission> getNamespacePermissions(Configuration conf,
       String namespace) throws IOException {
     return getPermissions(conf, Bytes.toBytes(toNamespaceEntry(namespace)), null, null, null, null,
       false);
@@ -509,11 +521,11 @@ public class AccessControlLists {
    * See {@link AccessControlLists class documentation} for the key structure used for storage.
    * </p>
    */
-  static ListMultimap<String, TablePermission> getPermissions(Configuration conf, byte[] entryName,
+  static ListMultimap<String, UserPermission> getPermissions(Configuration conf, byte[] entryName,
       Table t, byte[] cf, byte[] cq, String user, boolean hasFilterUser) throws IOException {
     if (entryName == null) entryName = ACL_GLOBAL_NAME;
     // for normal user tables, we just read the table row from _acl_
-    ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
+    ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
     Get get = new Get(entryName);
     get.addFamily(ACL_LIST_FAMILY);
     Result row = null;
@@ -570,25 +582,12 @@ public class AccessControlLists {
    */
   static List<UserPermission> getUserPermissions(Configuration conf, byte[] entryName, byte[] cf,
       byte[] cq, String user, boolean hasFilterUser) throws IOException {
-    ListMultimap<String, TablePermission> allPerms =
+    ListMultimap<String, UserPermission> allPerms =
         getPermissions(conf, entryName, null, cf, cq, user, hasFilterUser);
-
     List<UserPermission> perms = new ArrayList<>();
-    if (isNamespaceEntry(entryName)) { // Namespace
-      for (Map.Entry<String, TablePermission> entry : allPerms.entries()) {
-        UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-            entry.getValue().getNamespace(), entry.getValue().getActions());
-        perms.add(up);
-      }
-    } else { // Table
-      for (Map.Entry<String, TablePermission> entry : allPerms.entries()) {
-        UserPermission up = new UserPermission(Bytes.toBytes(entry.getKey()),
-            entry.getValue().getTableName(), entry.getValue().getFamily(),
-            entry.getValue().getQualifier(), entry.getValue().getActions());
-        perms.add(up);
-      }
+    for (Map.Entry<String, UserPermission> entry : allPerms.entries()) {
+      perms.add(entry.getValue());
     }
-
     return perms;
   }
 
@@ -596,25 +595,25 @@ public class AccessControlLists {
    * Parse and filter permission based on the specified column family, column qualifier and user
    * name.
    */
-  private static ListMultimap<String, TablePermission> parsePermissions(byte[] entryName,
+  private static ListMultimap<String, UserPermission> parsePermissions(byte[] entryName,
       Result result, byte[] cf, byte[] cq, String user, boolean hasFilterUser) {
-    ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
+    ListMultimap<String, UserPermission> perms = ArrayListMultimap.create();
     if (result != null && result.size() > 0) {
       for (Cell kv : result.rawCells()) {
-        Pair<String, TablePermission> permissionsOfUserOnTable =
+        Pair<String, Permission> permissionsOfUserOnTable =
             parsePermissionRecord(entryName, kv, cf, cq, hasFilterUser, user);
 
         if (permissionsOfUserOnTable != null) {
           String username = permissionsOfUserOnTable.getFirst();
-          TablePermission permissions = permissionsOfUserOnTable.getSecond();
-          perms.put(username, permissions);
+          Permission permission = permissionsOfUserOnTable.getSecond();
+          perms.put(username, new UserPermission(username, permission));
         }
       }
     }
     return perms;
   }
 
-  private static Pair<String, TablePermission> parsePermissionRecord(byte[] entryName, Cell kv,
+  private static Pair<String, Permission> parsePermissionRecord(byte[] entryName, Cell kv,
       byte[] cf, byte[] cq, boolean filterPerms, String filterUser) {
     // return X given a set of permissions encoded in the permissionRecord kv.
     byte[] family = CellUtil.cloneFamily(kv);
@@ -625,9 +624,10 @@ public class AccessControlLists {
     byte[] key = CellUtil.cloneQualifier(kv);
     byte[] value = CellUtil.cloneValue(kv);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Read acl: kv ["+
-          Bytes.toStringBinary(key)+": "+
-          Bytes.toStringBinary(value)+"]");
+      LOG.debug("Read acl: entry[" +
+        Bytes.toStringBinary(entryName) + "], kv [" +
+        Bytes.toStringBinary(key) + ": " +
+        Bytes.toStringBinary(value)+"]");
     }
 
     // check for a column family appended to the key
@@ -652,11 +652,20 @@ public class AccessControlLists {
       }
 
       return new Pair<>(username,
-          new TablePermission(Bytes.toString(fromNamespaceEntry(entryName)), value));
+        new NamespacePermission(Bytes.toString(fromNamespaceEntry(entryName)), value));
     }
 
-    //Handle table and global entry
-    //TODO global entry should be handled differently
+    // Handle global entry
+    if (isGlobalEntry(entryName)) {
+      // Filter the permissions cell record if client query
+      if (filterPerms && !validateFilterUser(username, filterUser, filterUserGroups)) {
+        return null;
+      }
+
+      return new Pair<>(username, new GlobalPermission(value));
+    }
+
+    // Handle table entry
     int idx = username.indexOf(ACL_KEY_DELIMITER);
     byte[] permFamily = null;
     byte[] permQualifier = null;
@@ -738,7 +747,7 @@ public class AccessControlLists {
    * Writes a set of permissions as {@link org.apache.hadoop.io.Writable} instances and returns the
    * resulting byte array. Writes a set of permission [user: table permission]
    */
-  public static byte[] writePermissionsAsBytes(ListMultimap<String, TablePermission> perms,
+  public static byte[] writePermissionsAsBytes(ListMultimap<String, UserPermission> perms,
       Configuration conf) {
     return ProtobufUtil.prependPBMagic(AccessControlUtil.toUserTablePermissions(perms).toByteArray());
   }
@@ -750,11 +759,11 @@ public class AccessControlLists {
 
   private static final int WRITABLE_NOT_ENCODED = 0;
 
-  private static List<TablePermission> readWritablePermissions(DataInput in, Configuration conf)
-      throws IOException, ClassNotFoundException {
+  private static List<Permission> readWritableUserPermission(DataInput in,
+      Configuration conf) throws IOException, ClassNotFoundException {
     assert WritableUtils.readVInt(in) == LIST_CODE;
     int length = in.readInt();
-    List<TablePermission> list = new ArrayList<>(length);
+    List<Permission> list = new ArrayList<>(length);
     for (int i = 0; i < length; i++) {
       assert WritableUtils.readVInt(in) == WRITABLE_CODE;
       assert WritableUtils.readVInt(in) == WRITABLE_NOT_ENCODED;
@@ -762,38 +771,67 @@ public class AccessControlLists {
       Class<? extends Writable> clazz = conf.getClassByName(className).asSubclass(Writable.class);
       Writable instance = WritableFactories.newInstance(clazz, conf);
       instance.readFields(in);
-      list.add((TablePermission) instance);
+      list.add((Permission) instance);
     }
     return list;
   }
 
-  /**
-   * Reads a set of permissions as {@link org.apache.hadoop.io.Writable} instances from the input
-   * stream.
-   */
-  public static ListMultimap<String, TablePermission> readPermissions(byte[] data,
+  @VisibleForTesting
+  public static ListMultimap<String, UserPermission> readUserPermission(byte[] data,
       Configuration conf) throws DeserializationException {
     if (ProtobufUtil.isPBMagicPrefix(data)) {
       int pblen = ProtobufUtil.lengthOfPBMagic();
       try {
         AccessControlProtos.UsersAndPermissions.Builder builder =
-            AccessControlProtos.UsersAndPermissions.newBuilder();
+          AccessControlProtos.UsersAndPermissions.newBuilder();
         ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
-        return AccessControlUtil.toUserTablePermissions(builder.build());
+        return AccessControlUtil.toUserPermission(builder.build());
       } catch (IOException e) {
         throw new DeserializationException(e);
       }
     } else {
       // TODO: We have to re-write non-PB data as PB encoded. Otherwise we will carry old Writables
       // forever (here and a couple of other places).
-      ListMultimap<String, TablePermission> perms = ArrayListMultimap.create();
+      ListMultimap<String, UserPermission> userPermission = ArrayListMultimap.create();
       try {
         DataInput in = new DataInputStream(new ByteArrayInputStream(data));
         int length = in.readInt();
         for (int i = 0; i < length; i++) {
           String user = Text.readString(in);
-          List<TablePermission> userPerms = readWritablePermissions(in, conf);
-          perms.putAll(user, userPerms);
+          List<Permission> perms = readWritableUserPermission(in, conf);
+          for (Permission p : perms) {
+            userPermission.put(user, new UserPermission(user, p));
+          }
+        }
+      } catch (IOException | ClassNotFoundException e) {
+        throw new DeserializationException(e);
+      }
+      return userPermission;
+    }
+  }
+
+  public static ListMultimap<String, Permission> readPermissions(byte[] data,
+      Configuration conf) throws DeserializationException {
+    if (ProtobufUtil.isPBMagicPrefix(data)) {
+      int pblen = ProtobufUtil.lengthOfPBMagic();
+      try {
+        AccessControlProtos.UsersAndPermissions.Builder builder =
+          AccessControlProtos.UsersAndPermissions.newBuilder();
+        ProtobufUtil.mergeFrom(builder, data, pblen, data.length - pblen);
+        return AccessControlUtil.toPermission(builder.build());
+      } catch (IOException e) {
+        throw new DeserializationException(e);
+      }
+    } else {
+      // TODO: We have to re-write non-PB data as PB encoded. Otherwise we will carry old Writables
+      // forever (here and a couple of other places).
+      ListMultimap<String, Permission> perms = ArrayListMultimap.create();
+      try {
+        DataInput in = new DataInputStream(new ByteArrayInputStream(data));
+        int length = in.readInt();
+        for (int i = 0; i < length; i++) {
+          String user = Text.readString(in);
+          perms.putAll(user, readWritableUserPermission(in, conf));
         }
       } catch (IOException | ClassNotFoundException e) {
         throw new DeserializationException(e);
@@ -802,6 +840,10 @@ public class AccessControlLists {
     }
   }
 
+  public static boolean isGlobalEntry(byte[] entryName) {
+    return entryName != null && TableName.valueOf(entryName).equals(ACL_TABLE_NAME);
+  }
+
   public static boolean isNamespaceEntry(String entryName) {
     return entryName != null && entryName.charAt(0) == NAMESPACE_PREFIX;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1100500..1a84bfd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -237,20 +237,20 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     return regionEnv != null ? regionEnv.getRegion() : null;
   }
 
-  public TableAuthManager getAuthManager() {
+  public AuthManager getAuthManager() {
     return accessChecker.getAuthManager();
   }
 
   private void initialize(RegionCoprocessorEnvironment e) throws IOException {
     final Region region = e.getRegion();
     Configuration conf = e.getConfiguration();
-    Map<byte[], ListMultimap<String, TablePermission>> tables = AccessControlLists.loadAll(region);
+    Map<byte[], ListMultimap<String, UserPermission>> tables = AccessControlLists.loadAll(region);
     // For each table, write out the table's permissions to the respective
     // znode for that table.
-    for (Map.Entry<byte[], ListMultimap<String,TablePermission>> t:
+    for (Map.Entry<byte[], ListMultimap<String, UserPermission>> t:
       tables.entrySet()) {
       byte[] entry = t.getKey();
-      ListMultimap<String,TablePermission> perms = t.getValue();
+      ListMultimap<String, UserPermission> perms = t.getValue();
       byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
       getAuthManager().getZKPermissionWatcher().writeToZookeeper(entry, serialized);
     }
@@ -286,7 +286,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     try (Table t = e.getConnection().getTable(AccessControlLists.ACL_TABLE_NAME)) {
       for (byte[] entry : entries) {
         currentEntry = entry;
-        ListMultimap<String, TablePermission> perms =
+        ListMultimap<String, UserPermission> perms =
             AccessControlLists.getPermissions(conf, entry, t, null, null, null, false);
         byte[] serialized = AccessControlLists.writePermissionsAsBytes(perms, conf);
         zkw.writeToZookeeper(entry, serialized);
@@ -330,7 +330,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     }
 
     // 2. check for the table-level, if successful we can short-circuit
-    if (getAuthManager().authorize(user, tableName, (byte[])null, permRequest)) {
+    if (getAuthManager().authorizeUserTable(user, tableName, permRequest)) {
       return AuthResult.allow(request, "Table permission granted", user,
         permRequest, tableName, families);
     }
@@ -340,7 +340,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
       // all families must pass
       for (Map.Entry<byte [], ? extends Collection<?>> family : families.entrySet()) {
         // a) check for family level access
-        if (getAuthManager().authorize(user, tableName, family.getKey(),
+        if (getAuthManager().authorizeUserTable(user, tableName, family.getKey(),
             permRequest)) {
           continue;  // family-level permission overrides per-qualifier
         }
@@ -351,17 +351,17 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
             // for each qualifier of the family
             Set<byte[]> familySet = (Set<byte[]>)family.getValue();
             for (byte[] qualifier : familySet) {
-              if (!getAuthManager().authorize(user, tableName, family.getKey(),
-                                         qualifier, permRequest)) {
+              if (!getAuthManager().authorizeUserTable(user, tableName,
+                    family.getKey(), qualifier, permRequest)) {
                 return AuthResult.deny(request, "Failed qualifier check", user,
-                    permRequest, tableName, makeFamilyMap(family.getKey(), qualifier));
+                  permRequest, tableName, makeFamilyMap(family.getKey(), qualifier));
               }
             }
           } else if (family.getValue() instanceof List) { // List<Cell>
             List<Cell> cellList = (List<Cell>)family.getValue();
             for (Cell cell : cellList) {
-              if (!getAuthManager().authorize(user, tableName, family.getKey(),
-                CellUtil.cloneQualifier(cell), permRequest)) {
+              if (!getAuthManager().authorizeUserTable(user, tableName, family.getKey(),
+                  CellUtil.cloneQualifier(cell), permRequest)) {
                 return AuthResult.deny(request, "Failed qualifier check", user, permRequest,
                   tableName, makeFamilyMap(family.getKey(), CellUtil.cloneQualifier(cell)));
               }
@@ -370,7 +370,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
         } else {
           // no qualifiers and family-level check already failed
           return AuthResult.deny(request, "Failed family check", user, permRequest,
-              tableName, makeFamilyMap(family.getKey(), null));
+            tableName, makeFamilyMap(family.getKey(), null));
         }
       }
 
@@ -487,14 +487,13 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
           familyMap.entrySet()) {
         if (family.getValue() != null && !family.getValue().isEmpty()) {
           for (byte[] qualifier : family.getValue()) {
-            if (getAuthManager().matchPermission(user, tableName,
-                family.getKey(), qualifier, perm)) {
+            if (getAuthManager().authorizeUserTable(user, tableName,
+                  family.getKey(), qualifier, perm)) {
               return true;
             }
           }
         } else {
-          if (getAuthManager().matchPermission(user, tableName, family.getKey(),
-              perm)) {
+          if (getAuthManager().authorizeUserFamily(user, tableName, family.getKey(), perm)) {
             return true;
           }
         }
@@ -683,7 +682,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
           foundColumn = true;
           for (Action action: actions) {
             // Are there permissions for this user for the cell?
-            if (!getAuthManager().authorize(user, getTableName(e), cell, action)) {
+            if (!getAuthManager().authorizeCell(user, getTableName(e), cell, action)) {
               // We can stop if the cell ACL denies access
               return false;
             }
@@ -798,7 +797,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
 
     // set the user-provider.
     this.userProvider = UserProvider.instantiate(env.getConfiguration());
-    // Throws RuntimeException if fails to load TableAuthManager so that coprocessor is unloaded.
+    // Throws RuntimeException if fails to load AuthManager so that coprocessor is unloaded.
     accessChecker = new AccessChecker(env.getConfiguration(), zk);
     tableAcls = new MapMaker().weakValues().makeMap();
   }
@@ -886,8 +885,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
         // default the table owner to current user, if not specified.
         if (owner == null)
           owner = getActiveUser(c).getShortName();
-        final UserPermission userperm = new UserPermission(Bytes.toBytes(owner),
-            desc.getTableName(), null, Action.values());
+        final UserPermission userPermission = new UserPermission(owner,
+          desc.getTableName(), Action.values());
         // switch to the real hbase master user for doing the RPC on the ACL table
         User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
           @Override
@@ -895,7 +894,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
             try (Table table = c.getEnvironment().getConnection().
                 getTable(AccessControlLists.ACL_TABLE_NAME)) {
               AccessControlLists.addUserPermission(c.getEnvironment().getConfiguration(),
-                  userperm, table);
+                userPermission, table);
             }
             return null;
           }
@@ -988,8 +987,8 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {
-        UserPermission userperm = new UserPermission(Bytes.toBytes(owner),
-            currentDesc.getTableName(), null, Action.values());
+        UserPermission userperm = new UserPermission(owner,
+          currentDesc.getTableName(), Action.values());
         try (Table table = c.getEnvironment().getConnection().
             getTable(AccessControlLists.ACL_TABLE_NAME)) {
           AccessControlLists.addUserPermission(conf, userperm, table);
@@ -2044,19 +2043,24 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
         if (!initialized) {
           throw new CoprocessorException("AccessController not yet initialized");
         }
+        User caller = RpcServer.getRequestUser().orElse(null);
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Received request to grant access permission " + perm.toString());
+          LOG.debug("Received request from {} to grant access permission {}",
+            caller.getName(), perm.toString());
         }
-        User caller = RpcServer.getRequestUser().orElse(null);
 
         switch(request.getUserPermission().getPermission().getType()) {
           case Global :
+            accessChecker.requireGlobalPermission(caller, "grant", Action.ADMIN, "");
+            break;
           case Table :
-            accessChecker.requirePermission(caller, "grant", perm.getTableName(),
-                perm.getFamily(), perm.getQualifier(), null, Action.ADMIN);
+            TablePermission tablePerm = (TablePermission) perm.getPermission();
+            accessChecker.requirePermission(caller, "grant", tablePerm.getTableName(),
+              tablePerm.getFamily(), tablePerm.getQualifier(), null, Action.ADMIN);
             break;
           case Namespace :
-            accessChecker.requireNamespacePermission(caller, "grant", perm.getNamespace(),
+            NamespacePermission namespacePer = (NamespacePermission) perm.getPermission();
+            accessChecker.requireNamespacePermission(caller, "grant", namespacePer.getNamespace(),
                 null, Action.ADMIN);
            break;
         }
@@ -2102,20 +2106,25 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
         if (!initialized) {
           throw new CoprocessorException("AccessController not yet initialized");
         }
+        User caller = RpcServer.getRequestUser().orElse(null);
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Received request to revoke access permission " + perm.toString());
+          LOG.debug("Received request from {} to revoke access permission {}",
+            caller.getShortName(), perm.toString());
         }
-        User caller = RpcServer.getRequestUser().orElse(null);
 
         switch(request.getUserPermission().getPermission().getType()) {
           case Global :
+            accessChecker.requireGlobalPermission(caller, "revoke", Action.ADMIN, "");
+            break;
           case Table :
-            accessChecker.requirePermission(caller, "revoke", perm.getTableName(), perm.getFamily(),
-              perm.getQualifier(), null, Action.ADMIN);
+            TablePermission tablePerm = (TablePermission) perm.getPermission();
+            accessChecker.requirePermission(caller, "revoke", tablePerm.getTableName(),
+              tablePerm.getFamily(), tablePerm.getQualifier(), null, Action.ADMIN);
             break;
           case Namespace :
-            accessChecker.requireNamespacePermission(caller, "revoke", perm.getNamespace(),
-                null, Action.ADMIN);
+            NamespacePermission namespacePer = (NamespacePermission) perm.getPermission();
+            accessChecker.requireNamespacePermission(caller, "revoke",
+              namespacePer.getNamespace(), null, Action.ADMIN);
             break;
         }
 
@@ -2189,7 +2198,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
         } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) {
           final String namespace = request.getNamespaceName().toStringUtf8();
           accessChecker.requireNamespacePermission(caller, "userPermissions",
-            namespace,userName, Action.ADMIN);
+            namespace, userName, Action.ADMIN);
           perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() {
             @Override
             public List<UserPermission> run() throws Exception {
@@ -2225,8 +2234,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
             // them. Also using acl as table name to be inline with the results of global admin and
             // will help in avoiding any leakage of information about being superusers.
             for (String user : Superusers.getSuperUsers()) {
-              perms.add(new UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
-                  null, Action.values()));
+              perms.add(new UserPermission(user, Action.values()));
             }
           }
         }
@@ -2295,7 +2303,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
 
           for (Action action : permission.getActions()) {
             AuthResult result;
-            if (getAuthManager().authorize(user, action)) {
+            if (getAuthManager().authorizeUserGlobal(user, action)) {
               result = AuthResult.allow("checkPermissions", "Global action allowed", user,
                 action, null, null);
             } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
new file mode 100644
index 0000000..8da9a82
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AuthManager.java
@@ -0,0 +1,608 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.log.HBaseMarkers;
+import org.apache.hadoop.hbase.security.Superusers;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.zookeeper.KeeperException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * Performs authorization checks for a given user's assigned permissions.
+ * <p>
+ *   There're following scopes: <b>Global</b>, <b>Namespace</b>, <b>Table</b>, <b>Family</b>,
+ *   <b>Qualifier</b>, <b>Cell</b>.
+ *   Generally speaking, higher scopes can overrides lower scopes,
+ *   except for Cell permission can be granted even a user has not permission on specified table,
+ *   which means the user can get/scan only those granted cells parts.
+ * </p>
+ * e.g, if user A has global permission R(ead), he can
+ * read table T without checking table scope permission, so authorization checks alway starts from
+ * Global scope.
+ * <p>
+ *   For each scope, not only user but also groups he belongs to will be checked.
+ * </p>
+ */
+@InterfaceAudience.Private
+public final class AuthManager implements Closeable {
+
+  /**
+   * Cache of permissions, it is thread safe.
+   * @param <T> T extends Permission
+   */
+  private static class PermissionCache<T extends Permission> {
+    private final Object mutex = new Object();
+    private Map<String, Set<T>> cache = new HashMap<>();
+
+    void put(String name, T perm) {
+      synchronized (mutex) {
+        Set<T> perms = cache.getOrDefault(name, new HashSet<>());
+        perms.add(perm);
+        cache.put(name, perms);
+      }
+    }
+
+    Set<T> get(String name) {
+      synchronized (mutex) {
+        return cache.get(name);
+      }
+    }
+
+    void clear() {
+      synchronized (mutex) {
+        for (Map.Entry<String, Set<T>> entry : cache.entrySet()) {
+          entry.getValue().clear();
+        }
+        cache.clear();
+      }
+    }
+  }
+  PermissionCache<NamespacePermission> NS_NO_PERMISSION = new PermissionCache<>();
+  PermissionCache<TablePermission> TBL_NO_PERMISSION = new PermissionCache<>();
+
+  /**
+   * Cache for global permission.
+   * Since every user/group can only have one global permission, no need to user PermissionCache.
+   */
+  private volatile Map<String, GlobalPermission> globalCache;
+  /** Cache for namespace permission. */
+  private ConcurrentHashMap<String, PermissionCache<NamespacePermission>> namespaceCache =
+    new ConcurrentHashMap<>();
+  /** Cache for table permission. */
+  private ConcurrentHashMap<TableName, PermissionCache<TablePermission>> tableCache =
+    new ConcurrentHashMap<>();
+
+  private static final Logger LOG = LoggerFactory.getLogger(AuthManager.class);
+
+  private Configuration conf;
+  private ZKPermissionWatcher zkperms;
+  private final AtomicLong mtime = new AtomicLong(0L);
+
+  private AuthManager(ZKWatcher watcher, Configuration conf)
+      throws IOException {
+    this.conf = conf;
+    // initialize global permissions based on configuration
+    globalCache = initGlobal(conf);
+
+    this.zkperms = new ZKPermissionWatcher(watcher, this, conf);
+    try {
+      this.zkperms.start();
+    } catch (KeeperException ke) {
+      LOG.error("ZooKeeper initialization failed", ke);
+    }
+  }
+
+  @Override
+  public void close() {
+    this.zkperms.close();
+  }
+
+  /**
+   * Initialize with global permission assignments
+   * from the {@code hbase.superuser} configuration key.
+   */
+  private Map<String, GlobalPermission> initGlobal(Configuration conf) throws IOException {
+    UserProvider userProvider = UserProvider.instantiate(conf);
+    User user = userProvider.getCurrent();
+    if (user == null) {
+      throw new IOException("Unable to obtain the current user, " +
+        "authorization checks for internal operations will not work correctly!");
+    }
+    String currentUser = user.getShortName();
+
+    Map<String, GlobalPermission> global = new HashMap<>();
+    // the system user is always included
+    List<String> superusers = Lists.asList(currentUser, conf.getStrings(
+        Superusers.SUPERUSER_CONF_KEY, new String[0]));
+    for (String name : superusers) {
+      GlobalPermission globalPermission = new GlobalPermission(Permission.Action.values());
+      global.put(name, globalPermission);
+    }
+    return global;
+  }
+
+  public ZKPermissionWatcher getZKPermissionWatcher() {
+    return this.zkperms;
+  }
+
+  /**
+   * Update acl info for table.
+   * @param table name of table
+   * @param data updated acl data
+   * @throws IOException exception when deserialize data
+   */
+  public void refreshTableCacheFromWritable(TableName table, byte[] data) throws IOException {
+    if (data != null && data.length > 0) {
+      try {
+        ListMultimap<String, Permission> perms =
+          AccessControlLists.readPermissions(data, conf);
+        if (perms != null) {
+          if (Bytes.equals(table.getName(), AccessControlLists.ACL_GLOBAL_NAME)) {
+            updateGlobalCache(perms);
+          } else {
+            updateTableCache(table, perms);
+          }
+        }
+      } catch (DeserializationException e) {
+        throw new IOException(e);
+      }
+    } else {
+      LOG.info("Skipping permission cache refresh because writable data is empty");
+    }
+  }
+
+  /**
+   * Update acl info for namespace.
+   * @param namespace namespace
+   * @param data updated acl data
+   * @throws IOException exception when deserialize data
+   */
+  public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException {
+    if (data != null && data.length > 0) {
+      try {
+        ListMultimap<String, Permission> perms =
+          AccessControlLists.readPermissions(data, conf);
+        if (perms != null) {
+          updateNamespaceCache(namespace, perms);
+        }
+      } catch (DeserializationException e) {
+        throw new IOException(e);
+      }
+    } else {
+      LOG.debug("Skipping permission cache refresh because writable data is empty");
+    }
+  }
+
+  /**
+   * Updates the internal global permissions cache.
+   * @param globalPerms new global permissions
+   */
+  private void updateGlobalCache(ListMultimap<String, Permission> globalPerms) {
+    try {
+      Map<String, GlobalPermission> global = initGlobal(conf);
+      for (String name : globalPerms.keySet()) {
+        for (Permission permission : globalPerms.get(name)) {
+          global.put(name, (GlobalPermission) permission);
+        }
+      }
+      globalCache = global;
+      mtime.incrementAndGet();
+    } catch (Exception e) {
+      // Never happens
+      LOG.error("Error occurred while updating the global cache", e);
+    }
+  }
+
+  /**
+   * Updates the internal table permissions cache for specified table.
+   * @param table updated table name
+   * @param tablePerms new table permissions
+   */
+  private void updateTableCache(TableName table, ListMultimap<String, Permission> tablePerms) {
+    PermissionCache<TablePermission> cacheToUpdate =
+      tableCache.getOrDefault(table, new PermissionCache<>());
+    clearCache(cacheToUpdate);
+    updateCache(tablePerms, cacheToUpdate);
+    tableCache.put(table, cacheToUpdate);
+    mtime.incrementAndGet();
+  }
+
+  /**
+   * Updates the internal namespace permissions cache for specified namespace.
+   * @param namespace updated namespace
+   * @param nsPerms new namespace permissions
+   */
+  private void updateNamespaceCache(String namespace,
+      ListMultimap<String, Permission> nsPerms) {
+    PermissionCache<NamespacePermission> cacheToUpdate =
+      namespaceCache.getOrDefault(namespace, new PermissionCache<>());
+    clearCache(cacheToUpdate);
+    updateCache(nsPerms, cacheToUpdate);
+    namespaceCache.put(namespace, cacheToUpdate);
+    mtime.incrementAndGet();
+  }
+
+  private void clearCache(PermissionCache cacheToUpdate) {
+    cacheToUpdate.clear();
+  }
+
+  @SuppressWarnings("unchecked")
+  private void updateCache(ListMultimap<String, ? extends Permission> newPermissions,
+      PermissionCache cacheToUpdate) {
+    for (String name : newPermissions.keySet()) {
+      for (Permission permission : newPermissions.get(name)) {
+        cacheToUpdate.put(name, permission);
+      }
+    }
+  }
+
+  /**
+   * Check if user has given action privilige in global scope.
+   * @param user user name
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserGlobal(User user, Permission.Action action) {
+    if (user == null) {
+      return false;
+    }
+    if (authorizeGlobal(globalCache.get(user.getShortName()), action)) {
+      return true;
+    }
+    for (String group : user.getGroupNames()) {
+      if (authorizeGlobal(globalCache.get(AuthUtil.toGroupEntry(group)), action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean authorizeGlobal(GlobalPermission permissions, Permission.Action action) {
+    return permissions != null && permissions.implies(action);
+  }
+
+  /**
+   * Check if user has given action privilige in namespace scope.
+   * @param user user name
+   * @param namespace namespace
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserNamespace(User user, String namespace, Permission.Action action) {
+    if (user == null) {
+      return false;
+    }
+    if (authorizeUserGlobal(user, action)) {
+      return true;
+    }
+    PermissionCache<NamespacePermission> nsPermissions = namespaceCache.getOrDefault(namespace,
+      NS_NO_PERMISSION);
+    if (authorizeNamespace(nsPermissions.get(user.getShortName()), namespace, action)) {
+      return true;
+    }
+    for (String group : user.getGroupNames()) {
+      if (authorizeNamespace(nsPermissions.get(AuthUtil.toGroupEntry(group)), namespace, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean authorizeNamespace(Set<NamespacePermission> permissions,
+      String namespace, Permission.Action action) {
+    if (permissions == null) {
+      return false;
+    }
+    for (NamespacePermission permission : permissions) {
+      if (permission.implies(namespace, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Checks if the user has access to the full table or at least a family/qualifier
+   * for the specified action.
+   * @param user user name
+   * @param table table name
+   * @param action action in one of [Read, Write, Create, Exec, Admin]
+   * @return true if the user has access to the table, false otherwise
+   */
+  public boolean accessUserTable(User user, TableName table, Permission.Action action) {
+    if (user == null) {
+      return false;
+    }
+    if (table == null) {
+      table = AccessControlLists.ACL_TABLE_NAME;
+    }
+    if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) {
+      return true;
+    }
+    PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table,
+      TBL_NO_PERMISSION);
+    if (hasAccessTable(tblPermissions.get(user.getShortName()), action)) {
+      return true;
+    }
+    for (String group : user.getGroupNames()) {
+      if (hasAccessTable(tblPermissions.get(AuthUtil.toGroupEntry(group)), action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean hasAccessTable(Set<TablePermission> permissions, Permission.Action action) {
+    if (permissions == null) {
+      return false;
+    }
+    for (TablePermission permission : permissions) {
+      if (permission.implies(action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Check if user has given action privilige in table scope.
+   * @param user user name
+   * @param table table name
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserTable(User user, TableName table, Permission.Action action) {
+    return authorizeUserTable(user, table, null, null, action);
+  }
+
+  /**
+   * Check if user has given action privilige in table:family scope.
+   * @param user user name
+   * @param table table name
+   * @param family family name
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserTable(User user, TableName table, byte[] family,
+      Permission.Action action) {
+    return authorizeUserTable(user, table, family, null, action);
+  }
+
+  /**
+   * Check if user has given action privilige in table:family:qualifier scope.
+   * @param user user name
+   * @param table table name
+   * @param family family name
+   * @param qualifier qualifier name
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserTable(User user, TableName table, byte[] family,
+      byte[] qualifier, Permission.Action action) {
+    if (user == null) {
+      return false;
+    }
+    if (table == null) {
+      table = AccessControlLists.ACL_TABLE_NAME;
+    }
+    if (authorizeUserNamespace(user, table.getNamespaceAsString(), action)) {
+      return true;
+    }
+    PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table,
+      TBL_NO_PERMISSION);
+    if (authorizeTable(tblPermissions.get(user.getShortName()), table, family, qualifier, action)) {
+      return true;
+    }
+    for (String group : user.getGroupNames()) {
+      if (authorizeTable(tblPermissions.get(AuthUtil.toGroupEntry(group)),
+          table, family, qualifier, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean authorizeTable(Set<TablePermission> permissions,
+      TableName table, byte[] family, byte[] qualifier, Permission.Action action) {
+    if (permissions == null) {
+      return false;
+    }
+    for (TablePermission permission : permissions) {
+      if (permission.implies(table, family, qualifier, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Check if user has given action privilige in table:family scope.
+   * This method is for backward compatibility.
+   * @param user user name
+   * @param table table name
+   * @param family family names
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeUserFamily(User user, TableName table,
+      byte[] family, Permission.Action action) {
+    PermissionCache<TablePermission> tblPermissions = tableCache.getOrDefault(table,
+      TBL_NO_PERMISSION);
+    if (authorizeFamily(tblPermissions.get(user.getShortName()), table, family, action)) {
+      return true;
+    }
+    for (String group : user.getGroupNames()) {
+      if (authorizeFamily(tblPermissions.get(AuthUtil.toGroupEntry(group)),
+          table, family, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean authorizeFamily(Set<TablePermission> permissions,
+      TableName table, byte[] family, Permission.Action action) {
+    if (permissions == null) {
+      return false;
+    }
+    for (TablePermission permission : permissions) {
+      if (permission.implies(table, family, action)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Check if user has given action privilige in cell scope.
+   * @param user user name
+   * @param table table name
+   * @param cell cell to be checked
+   * @param action one of action in [Read, Write, Create, Exec, Admin]
+   * @return true if user has, false otherwise
+   */
+  public boolean authorizeCell(User user, TableName table, Cell cell, Permission.Action action) {
+    try {
+      List<Permission> perms = AccessControlLists.getCellPermissionsForUser(user, cell);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Perms for user " + user.getShortName() + " in cell " + cell + ": " +
+          (perms != null ? perms : ""));
+      }
+      if (perms != null) {
+        for (Permission p: perms) {
+          if (p.implies(action)) {
+            return true;
+          }
+        }
+      }
+    } catch (IOException e) {
+      // We failed to parse the KV tag
+      LOG.error("Failed parse of ACL tag in cell " + cell);
+      // Fall through to check with the table and CF perms we were able
+      // to collect regardless
+    }
+    return false;
+  }
+
+  /**
+   * Remove given namespace from AuthManager's namespace cache.
+   * @param ns namespace
+   */
+  public void removeNamespace(byte[] ns) {
+    namespaceCache.remove(Bytes.toString(ns));
+  }
+
+  /**
+   * Remove given table from AuthManager's table cache.
+   * @param table table name
+   */
+  public void removeTable(TableName table) {
+    tableCache.remove(table);
+  }
+
+  /**
+   * Last modification logical time
+   * @return time
+   */
+  public long getMTime() {
+    return mtime.get();
+  }
+
+  private static Map<ZKWatcher, AuthManager> managerMap = new HashMap<>();
+
+  private static Map<AuthManager, Integer> refCount = new HashMap<>();
+
+  /**
+   * Returns a AuthManager from the cache. If not cached, constructs a new one.
+   * Returned instance should be released back by calling {@link #release(AuthManager)}.
+   * @param watcher zk watcher
+   * @param conf configuration
+   * @return an AuthManager
+   * @throws IOException zookeeper initialization failed
+   */
+  public synchronized static AuthManager getOrCreate(
+      ZKWatcher watcher, Configuration conf) throws IOException {
+    AuthManager instance = managerMap.get(watcher);
+    if (instance == null) {
+      instance = new AuthManager(watcher, conf);
+      managerMap.put(watcher, instance);
+    }
+    int ref = refCount.get(instance) == null ? 0 : refCount.get(instance);
+    refCount.put(instance, ref + 1);
+    return instance;
+  }
+
+  @VisibleForTesting
+  public static int getTotalRefCount() {
+    int total = 0;
+    for (int count : refCount.values()) {
+      total += count;
+    }
+    return total;
+  }
+
+  /**
+   * Releases the resources for the given AuthManager if the reference count is down to 0.
+   * @param instance AuthManager to be released
+   */
+  public synchronized static void release(AuthManager instance) {
+    if (refCount.get(instance) == null || refCount.get(instance) < 1) {
+      String msg = "Something wrong with the AuthManager reference counting: " + instance
+          + " whose count is " + refCount.get(instance);
+      LOG.error(HBaseMarkers.FATAL, msg);
+      instance.close();
+      managerMap.remove(instance.getZKPermissionWatcher().getWatcher());
+      instance.getZKPermissionWatcher().getWatcher().abort(msg, null);
+    } else {
+      int ref = refCount.get(instance);
+      --ref;
+      refCount.put(instance, ref);
+      if (ref == 0) {
+        instance.close();
+        managerMap.remove(instance.getZKPermissionWatcher().getWatcher());
+        refCount.remove(instance);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
deleted file mode 100644
index 76feff4..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/TableAuthManager.java
+++ /dev/null
@@ -1,787 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.security.access;
-
-import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap;
-import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.AuthUtil;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.security.Superusers;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Performs authorization checks for a given user's assigned permissions
- */
-@InterfaceAudience.Private
-public class TableAuthManager implements Closeable {
-  private static class PermissionCache<T extends Permission> {
-    /** Cache of user permissions */
-    private ListMultimap<String,T> userCache = ArrayListMultimap.create();
-    /** Cache of group permissions */
-    private ListMultimap<String,T> groupCache = ArrayListMultimap.create();
-
-    public List<T> getUser(String user) {
-      return userCache.get(user);
-    }
-
-    public void putUser(String user, T perm) {
-      userCache.put(user, perm);
-    }
-
-    public List<T> replaceUser(String user, Iterable<? extends T> perms) {
-      return userCache.replaceValues(user, perms);
-    }
-
-    public List<T> getGroup(String group) {
-      return groupCache.get(group);
-    }
-
-    public void putGroup(String group, T perm) {
-      groupCache.put(group, perm);
-    }
-
-    public List<T> replaceGroup(String group, Iterable<? extends T> perms) {
-      return groupCache.replaceValues(group, perms);
-    }
-
-    /**
-     * Returns a combined map of user and group permissions, with group names
-     * distinguished according to {@link AuthUtil#isGroupPrincipal(String)}.
-     */
-    public ListMultimap<String,T> getAllPermissions() {
-      ListMultimap<String,T> tmp = ArrayListMultimap.create();
-      tmp.putAll(userCache);
-      for (String group : groupCache.keySet()) {
-        tmp.putAll(AuthUtil.toGroupEntry(group), groupCache.get(group));
-      }
-      return tmp;
-    }
-  }
-
-  private static final Logger LOG = LoggerFactory.getLogger(TableAuthManager.class);
-
-  /** Cache of global permissions */
-  private volatile PermissionCache<Permission> globalCache;
-
-  private ConcurrentSkipListMap<TableName, PermissionCache<TablePermission>> tableCache =
-      new ConcurrentSkipListMap<>();
-
-  private ConcurrentSkipListMap<String, PermissionCache<TablePermission>> nsCache =
-    new ConcurrentSkipListMap<>();
-
-  private Configuration conf;
-  private ZKPermissionWatcher zkperms;
-  private final AtomicLong mtime = new AtomicLong(0L);
-
-  private TableAuthManager(ZKWatcher watcher, Configuration conf)
-      throws IOException {
-    this.conf = conf;
-
-    // initialize global permissions based on configuration
-    globalCache = initGlobal(conf);
-
-    this.zkperms = new ZKPermissionWatcher(watcher, this, conf);
-    try {
-      this.zkperms.start();
-    } catch (KeeperException ke) {
-      LOG.error("ZooKeeper initialization failed", ke);
-    }
-  }
-
-  @Override
-  public void close() {
-    this.zkperms.close();
-  }
-
-  /**
-   * Returns a new {@code PermissionCache} initialized with permission assignments
-   * from the {@code hbase.superuser} configuration key.
-   */
-  private PermissionCache<Permission> initGlobal(Configuration conf) throws IOException {
-    UserProvider userProvider = UserProvider.instantiate(conf);
-    User user = userProvider.getCurrent();
-    if (user == null) {
-      throw new IOException("Unable to obtain the current user, " +
-          "authorization checks for internal operations will not work correctly!");
-    }
-    PermissionCache<Permission> newCache = new PermissionCache<>();
-    String currentUser = user.getShortName();
-
-    // the system user is always included
-    List<String> superusers = Lists.asList(currentUser, conf.getStrings(
-        Superusers.SUPERUSER_CONF_KEY, new String[0]));
-    if (superusers != null) {
-      for (String name : superusers) {
-        if (AuthUtil.isGroupPrincipal(name)) {
-          newCache.putGroup(AuthUtil.getGroupName(name),
-              new Permission(Permission.Action.values()));
-        } else {
-          newCache.putUser(name, new Permission(Permission.Action.values()));
-        }
-      }
-    }
-    return newCache;
-  }
-
-  public ZKPermissionWatcher getZKPermissionWatcher() {
-    return this.zkperms;
-  }
-
-  public void refreshTableCacheFromWritable(TableName table,
-                                       byte[] data) throws IOException {
-    if (data != null && data.length > 0) {
-      ListMultimap<String,TablePermission> perms;
-      try {
-        perms = AccessControlLists.readPermissions(data, conf);
-      } catch (DeserializationException e) {
-        throw new IOException(e);
-      }
-
-      if (perms != null) {
-        if (Bytes.equals(table.getName(), AccessControlLists.ACL_GLOBAL_NAME)) {
-          updateGlobalCache(perms);
-        } else {
-          updateTableCache(table, perms);
-        }
-      }
-    } else {
-      LOG.debug("Skipping permission cache refresh because writable data is empty");
-    }
-  }
-
-  public void refreshNamespaceCacheFromWritable(String namespace, byte[] data) throws IOException {
-    if (data != null && data.length > 0) {
-      ListMultimap<String,TablePermission> perms;
-      try {
-        perms = AccessControlLists.readPermissions(data, conf);
-      } catch (DeserializationException e) {
-        throw new IOException(e);
-      }
-      if (perms != null) {
-        updateNsCache(namespace, perms);
-      }
-    } else {
-      LOG.debug("Skipping permission cache refresh because writable data is empty");
-    }
-  }
-
-  /**
-   * Updates the internal global permissions cache
-   *
-   * @param userPerms
-   */
-  private void updateGlobalCache(ListMultimap<String,TablePermission> userPerms) {
-    PermissionCache<Permission> newCache = null;
-    try {
-      newCache = initGlobal(conf);
-      for (Map.Entry<String,TablePermission> entry : userPerms.entries()) {
-        if (AuthUtil.isGroupPrincipal(entry.getKey())) {
-          newCache.putGroup(AuthUtil.getGroupName(entry.getKey()),
-              new Permission(entry.getValue().getActions()));
-        } else {
-          newCache.putUser(entry.getKey(), new Permission(entry.getValue().getActions()));
-        }
-      }
-      globalCache = newCache;
-      mtime.incrementAndGet();
-    } catch (IOException e) {
-      // Never happens
-      LOG.error("Error occurred while updating the global cache", e);
-    }
-  }
-
-  /**
-   * Updates the internal permissions cache for a single table, splitting
-   * the permissions listed into separate caches for users and groups to optimize
-   * group lookups.
-   *
-   * @param table
-   * @param tablePerms
-   */
-  private void updateTableCache(TableName table,
-                                ListMultimap<String,TablePermission> tablePerms) {
-    PermissionCache<TablePermission> newTablePerms = new PermissionCache<>();
-
-    for (Map.Entry<String,TablePermission> entry : tablePerms.entries()) {
-      if (AuthUtil.isGroupPrincipal(entry.getKey())) {
-        newTablePerms.putGroup(AuthUtil.getGroupName(entry.getKey()), entry.getValue());
-      } else {
-        newTablePerms.putUser(entry.getKey(), entry.getValue());
-      }
-    }
-
-    tableCache.put(table, newTablePerms);
-    mtime.incrementAndGet();
-  }
-
-  /**
-   * Updates the internal permissions cache for a single table, splitting
-   * the permissions listed into separate caches for users and groups to optimize
-   * group lookups.
-   *
-   * @param namespace
-   * @param tablePerms
-   */
-  private void updateNsCache(String namespace,
-                             ListMultimap<String, TablePermission> tablePerms) {
-    PermissionCache<TablePermission> newTablePerms = new PermissionCache<>();
-
-    for (Map.Entry<String, TablePermission> entry : tablePerms.entries()) {
-      if (AuthUtil.isGroupPrincipal(entry.getKey())) {
-        newTablePerms.putGroup(AuthUtil.getGroupName(entry.getKey()), entry.getValue());
-      } else {
-        newTablePerms.putUser(entry.getKey(), entry.getValue());
-      }
-    }
-
-    nsCache.put(namespace, newTablePerms);
-    mtime.incrementAndGet();
-  }
-
-  private PermissionCache<TablePermission> getTablePermissions(TableName table) {
-    return computeIfAbsent(tableCache, table, PermissionCache::new);
-  }
-
-  private PermissionCache<TablePermission> getNamespacePermissions(String namespace) {
-    return computeIfAbsent(nsCache, namespace, PermissionCache::new);
-  }
-
-  /**
-   * Authorizes a global permission
-   * @param perms
-   * @param action
-   * @return true if authorized, false otherwise
-   */
-  private boolean authorize(List<Permission> perms, Permission.Action action) {
-    if (perms != null) {
-      for (Permission p : perms) {
-        if (p.implies(action)) {
-          return true;
-        }
-      }
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("No permissions found for " + action);
-    }
-
-    return false;
-  }
-
-  /**
-   * Authorize a global permission based on ACLs for the given user and the
-   * user's groups.
-   * @param user
-   * @param action
-   * @return true if known and authorized, false otherwise
-   */
-  public boolean authorize(User user, Permission.Action action) {
-    if (user == null) {
-      return false;
-    }
-
-    if (authorize(globalCache.getUser(user.getShortName()), action)) {
-      return true;
-    }
-
-    String[] groups = user.getGroupNames();
-    if (groups != null) {
-      for (String group : groups) {
-        if (authorize(globalCache.getGroup(group), action)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  private boolean authorize(List<TablePermission> perms,
-                            TableName table, byte[] family,
-                            byte[] qualifier, Permission.Action action) {
-    if (perms != null) {
-      for (TablePermission p : perms) {
-        if (p.implies(table, family, qualifier, action)) {
-          return true;
-        }
-      }
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("No permissions found for table="+table);
-    }
-    return false;
-  }
-
-  private boolean hasAccess(List<TablePermission> perms,
-                            TableName table, Permission.Action action) {
-    if (perms != null) {
-      for (TablePermission p : perms) {
-        if (p.implies(action)) {
-          return true;
-        }
-      }
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("No permissions found for table="+table);
-    }
-    return false;
-  }
-
-  /**
-   * Authorize a user for a given KV. This is called from AccessControlFilter.
-   */
-  public boolean authorize(User user, TableName table, Cell cell, Permission.Action action) {
-    try {
-      List<Permission> perms = AccessControlLists.getCellPermissionsForUser(user, cell);
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Perms for user " + user.getShortName() + " in cell " + cell + ": " +
-          (perms != null ? perms : ""));
-      }
-      if (perms != null) {
-        for (Permission p: perms) {
-          if (p.implies(action)) {
-            return true;
-          }
-        }
-      }
-    } catch (IOException e) {
-      // We failed to parse the KV tag
-      LOG.error("Failed parse of ACL tag in cell " + cell);
-      // Fall through to check with the table and CF perms we were able
-      // to collect regardless
-    }
-    return false;
-  }
-
-  public boolean authorize(User user, String namespace, Permission.Action action) {
-    // Global authorizations supercede namespace level
-    if (authorize(user, action)) {
-      return true;
-    }
-    // Check namespace permissions
-    PermissionCache<TablePermission> tablePerms = nsCache.get(namespace);
-    if (tablePerms != null) {
-      List<TablePermission> userPerms = tablePerms.getUser(user.getShortName());
-      if (authorize(userPerms, namespace, action)) {
-        return true;
-      }
-      String[] groupNames = user.getGroupNames();
-      if (groupNames != null) {
-        for (String group : groupNames) {
-          List<TablePermission> groupPerms = tablePerms.getGroup(group);
-          if (authorize(groupPerms, namespace, action)) {
-            return true;
-          }
-        }
-      }
-    }
-    return false;
-  }
-
-  private boolean authorize(List<TablePermission> perms, String namespace,
-                            Permission.Action action) {
-    if (perms != null) {
-      for (TablePermission p : perms) {
-        if (p.implies(namespace, action)) {
-          return true;
-        }
-      }
-    } else if (LOG.isDebugEnabled()) {
-      LOG.debug("No permissions for authorize() check, table=" + namespace);
-    }
-
-    return false;
-  }
-
-  /**
-   * Checks authorization to a given table and column family for a user, based on the
-   * stored user permissions.
-   *
-   * @param user
-   * @param table
-   * @param family
-   * @param action
-   * @return true if known and authorized, false otherwise
-   */
-  public boolean authorizeUser(User user, TableName table, byte[] family,
-      Permission.Action action) {
-    return authorizeUser(user, table, family, null, action);
-  }
-
-  public boolean authorizeUser(User user, TableName table, byte[] family,
-      byte[] qualifier, Permission.Action action) {
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
-    // Global and namespace authorizations supercede table level
-    if (authorize(user, table.getNamespaceAsString(), action)) {
-      return true;
-    }
-    // Check table permissions
-    return authorize(getTablePermissions(table).getUser(user.getShortName()), table, family,
-        qualifier, action);
-  }
-
-  /**
-   * Checks if the user has access to the full table or at least a family/qualifier
-   * for the specified action.
-   *
-   * @param user
-   * @param table
-   * @param action
-   * @return true if the user has access to the table, false otherwise
-   */
-  public boolean userHasAccess(User user, TableName table, Permission.Action action) {
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
-    // Global and namespace authorizations supercede table level
-    if (authorize(user, table.getNamespaceAsString(), action)) {
-      return true;
-    }
-    // Check table permissions
-    return hasAccess(getTablePermissions(table).getUser(user.getShortName()), table, action);
-  }
-
-  /**
-   * Checks global authorization for a given action for a group, based on the stored
-   * permissions.
-   */
-  public boolean authorizeGroup(String groupName, Permission.Action action) {
-    List<Permission> perms = globalCache.getGroup(groupName);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("authorizing " + (perms != null && !perms.isEmpty() ? perms.get(0) : "") +
-        " for " + action);
-    }
-    return authorize(perms, action);
-  }
-
-  /**
-   * Checks authorization to a given table, column family and column for a group, based
-   * on the stored permissions.
-   * @param groupName
-   * @param table
-   * @param family
-   * @param qualifier
-   * @param action
-   * @return true if known and authorized, false otherwise
-   */
-  public boolean authorizeGroup(String groupName, TableName table, byte[] family,
-      byte[] qualifier, Permission.Action action) {
-    // Global authorization supercedes table level
-    if (authorizeGroup(groupName, action)) {
-      return true;
-    }
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
-    // Namespace authorization supercedes table level
-    String namespace = table.getNamespaceAsString();
-    if (authorize(getNamespacePermissions(namespace).getGroup(groupName), namespace, action)) {
-      return true;
-    }
-    // Check table level
-    List<TablePermission> tblPerms = getTablePermissions(table).getGroup(groupName);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("authorizing " + (tblPerms != null && !tblPerms.isEmpty() ? tblPerms.get(0) : "") +
-        " for " +groupName + " on " + table + "." + Bytes.toString(family) + "." +
-        Bytes.toString(qualifier) + " with " + action);
-    }
-    return authorize(tblPerms, table, family, qualifier, action);
-  }
-
-  /**
-   * Checks if the user has access to the full table or at least a family/qualifier
-   * for the specified action.
-   * @param groupName
-   * @param table
-   * @param action
-   * @return true if the group has access to the table, false otherwise
-   */
-  public boolean groupHasAccess(String groupName, TableName table, Permission.Action action) {
-    // Global authorization supercedes table level
-    if (authorizeGroup(groupName, action)) {
-      return true;
-    }
-    if (table == null) table = AccessControlLists.ACL_TABLE_NAME;
-    // Namespace authorization supercedes table level
-    if (hasAccess(getNamespacePermissions(table.getNamespaceAsString()).getGroup(groupName),
-        table, action)) {
-      return true;
-    }
-    // Check table level
-    return hasAccess(getTablePermissions(table).getGroup(groupName), table, action);
-  }
-
-  public boolean authorize(User user, TableName table, byte[] family,
-      byte[] qualifier, Permission.Action action) {
-    if (authorizeUser(user, table, family, qualifier, action)) {
-      return true;
-    }
-
-    String[] groups = user.getGroupNames();
-    if (groups != null) {
-      for (String group : groups) {
-        if (authorizeGroup(group, table, family, qualifier, action)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public boolean hasAccess(User user, TableName table, Permission.Action action) {
-    if (userHasAccess(user, table, action)) {
-      return true;
-    }
-
-    String[] groups = user.getGroupNames();
-    if (groups != null) {
-      for (String group : groups) {
-        if (groupHasAccess(group, table, action)) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  public boolean authorize(User user, TableName table, byte[] family,
-      Permission.Action action) {
-    return authorize(user, table, family, null, action);
-  }
-
-  /**
-   * Returns true if the given user has a {@link TablePermission} matching up
-   * to the column family portion of a permission.  Note that this permission
-   * may be scoped to a given column qualifier and does not guarantee that
-   * authorize() on the same column family would return true.
-   */
-  public boolean matchPermission(User user,
-      TableName table, byte[] family, Permission.Action action) {
-    PermissionCache<TablePermission> tablePerms = tableCache.get(table);
-    if (tablePerms != null) {
-      List<TablePermission> userPerms = tablePerms.getUser(user.getShortName());
-      if (userPerms != null) {
-        for (TablePermission p : userPerms) {
-          if (p.matchesFamily(table, family, action)) {
-            return true;
-          }
-        }
-      }
-
-      String[] groups = user.getGroupNames();
-      if (groups != null) {
-        for (String group : groups) {
-          List<TablePermission> groupPerms = tablePerms.getGroup(group);
-          if (groupPerms != null) {
-            for (TablePermission p : groupPerms) {
-              if (p.matchesFamily(table, family, action)) {
-                return true;
-              }
-            }
-          }
-        }
-      }
-    }
-
-    return false;
-  }
-
-  public boolean matchPermission(User user,
-      TableName table, byte[] family, byte[] qualifier,
-      Permission.Action action) {
-    PermissionCache<TablePermission> tablePerms = tableCache.get(table);
-    if (tablePerms != null) {
-      List<TablePermission> userPerms = tablePerms.getUser(user.getShortName());
-      if (userPerms != null) {
-        for (TablePermission p : userPerms) {
-          if (p.matchesFamilyQualifier(table, family, qualifier, action)) {
-            return true;
-          }
-        }
-      }
-
-      String[] groups = user.getGroupNames();
-      if (groups != null) {
-        for (String group : groups) {
-          List<TablePermission> groupPerms = tablePerms.getGroup(group);
-          if (groupPerms != null) {
-            for (TablePermission p : groupPerms) {
-              if (p.matchesFamilyQualifier(table, family, qualifier, action)) {
-                return true;
-              }
-            }
-          }
-        }
-      }
-    }
-    return false;
-  }
-
-  public void removeNamespace(byte[] ns) {
-    nsCache.remove(Bytes.toString(ns));
-  }
-
-  public void removeTable(TableName table) {
-    tableCache.remove(table);
-  }
-
-  /**
-   * Overwrites the existing permission set for a given user for a table, and
-   * triggers an update for zookeeper synchronization.
-   * @param username
-   * @param table
-   * @param perms
-   */
-  public void setTableUserPermissions(String username, TableName table,
-      List<TablePermission> perms) {
-    PermissionCache<TablePermission> tablePerms = getTablePermissions(table);
-    tablePerms.replaceUser(username, perms);
-    writeTableToZooKeeper(table, tablePerms);
-  }
-
-  /**
-   * Overwrites the existing permission set for a group and triggers an update
-   * for zookeeper synchronization.
-   * @param group
-   * @param table
-   * @param perms
-   */
-  public void setTableGroupPermissions(String group, TableName table,
-      List<TablePermission> perms) {
-    PermissionCache<TablePermission> tablePerms = getTablePermissions(table);
-    tablePerms.replaceGroup(group, perms);
-    writeTableToZooKeeper(table, tablePerms);
-  }
-
-  /**
-   * Overwrites the existing permission set for a given user for a table, and
-   * triggers an update for zookeeper synchronization.
-   * @param username
-   * @param namespace
-   * @param perms
-   */
-  public void setNamespaceUserPermissions(String username, String namespace,
-      List<TablePermission> perms) {
-    PermissionCache<TablePermission> tablePerms = getNamespacePermissions(namespace);
-    tablePerms.replaceUser(username, perms);
-    writeNamespaceToZooKeeper(namespace, tablePerms);
-  }
-
-  /**
-   * Overwrites the existing permission set for a group and triggers an update
-   * for zookeeper synchronization.
-   * @param group
-   * @param namespace
-   * @param perms
-   */
-  public void setNamespaceGroupPermissions(String group, String namespace,
-      List<TablePermission> perms) {
-    PermissionCache<TablePermission> tablePerms = getNamespacePermissions(namespace);
-    tablePerms.replaceGroup(group, perms);
-    writeNamespaceToZooKeeper(namespace, tablePerms);
-  }
-
-  public void writeTableToZooKeeper(TableName table,
-      PermissionCache<TablePermission> tablePerms) {
-    byte[] serialized = new byte[0];
-    if (tablePerms != null) {
-      serialized = AccessControlLists.writePermissionsAsBytes(tablePerms.getAllPermissions(), conf);
-    }
-    zkperms.writeToZookeeper(table.getName(), serialized);
-  }
-
-  public void writeNamespaceToZooKeeper(String namespace,
-      PermissionCache<TablePermission> tablePerms) {
-    byte[] serialized = new byte[0];
-    if (tablePerms != null) {
-      serialized = AccessControlLists.writePermissionsAsBytes(tablePerms.getAllPermissions(), conf);
-    }
-    zkperms.writeToZookeeper(Bytes.toBytes(AccessControlLists.toNamespaceEntry(namespace)),
-        serialized);
-  }
-
-  public long getMTime() {
-    return mtime.get();
-  }
-
-  private static Map<ZKWatcher,TableAuthManager> managerMap = new HashMap<>();
-
-  private static Map<TableAuthManager, Integer> refCount = new HashMap<>();
-
-  /** Returns a TableAuthManager from the cache. If not cached, constructs a new one. Returned
-   * instance should be released back by calling {@link #release(TableAuthManager)}. */
-  public synchronized static TableAuthManager getOrCreate(
-          ZKWatcher watcher, Configuration conf) throws IOException {
-    TableAuthManager instance = managerMap.get(watcher);
-    if (instance == null) {
-      instance = new TableAuthManager(watcher, conf);
-      managerMap.put(watcher, instance);
-    }
-    int ref = refCount.get(instance) == null ? 0 : refCount.get(instance).intValue();
-    refCount.put(instance, ref + 1);
-    return instance;
-  }
-
-  @VisibleForTesting
-  public static int getTotalRefCount() {
-    int total = 0;
-    for (int count : refCount.values()) {
-      total += count;
-    }
-    return total;
-  }
-
-  /**
-   * Releases the resources for the given TableAuthManager if the reference count is down to 0.
-   * @param instance TableAuthManager to be released
-   */
-  public synchronized static void release(TableAuthManager instance) {
-    if (refCount.get(instance) == null || refCount.get(instance) < 1) {
-      String msg = "Something wrong with the TableAuthManager reference counting: " + instance
-          + " whose count is " + refCount.get(instance);
-      LOG.error(HBaseMarkers.FATAL, msg);
-      instance.close();
-      managerMap.remove(instance.getZKPermissionWatcher().getWatcher());
-      instance.getZKPermissionWatcher().getWatcher().abort(msg, null);
-    } else {
-      int ref = refCount.get(instance);
-      refCount.put(instance, ref-1);
-      if (ref-1 == 0) {
-        instance.close();
-        managerMap.remove(instance.getZKPermissionWatcher().getWatcher());
-        refCount.remove(instance);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index 2266e86..fa3c30f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -49,21 +49,21 @@ import java.util.concurrent.RejectedExecutionException;
  * {@code /hbase/acl/tablename}, with the znode data containing a serialized
  * list of the permissions granted for the table.  The {@code AccessController}
  * instances on all other cluster hosts watch the znodes for updates, which
- * trigger updates in the {@link TableAuthManager} permission cache.
+ * trigger updates in the {@link AuthManager} permission cache.
  */
 @InterfaceAudience.Private
 public class ZKPermissionWatcher extends ZKListener implements Closeable {
   private static final Logger LOG = LoggerFactory.getLogger(ZKPermissionWatcher.class);
   // parent node for permissions lists
   static final String ACL_NODE = "acl";
-  private final TableAuthManager authManager;
+  private final AuthManager authManager;
   private final String aclZNode;
   private final CountDownLatch initialized = new CountDownLatch(1);
   private final ExecutorService executor;
   private Future<?> childrenChangedFuture;
 
   public ZKPermissionWatcher(ZKWatcher watcher,
-      TableAuthManager authManager, Configuration conf) {
+      AuthManager authManager, Configuration conf) {
     super(watcher);
     this.authManager = authManager;
     String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/130057f1/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 7edf734..6aa378c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.ShadedAccessControlUtil;
 import org.apache.hadoop.hbase.security.access.TablePermission;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -858,15 +859,16 @@ public class RestoreSnapshotHelper {
       Configuration conf) throws IOException {
     if (snapshot.hasUsersAndPermissions() && snapshot.getUsersAndPermissions() != null) {
       LOG.info("Restore snapshot acl to table. snapshot: " + snapshot + ", table: " + newTableName);
-      ListMultimap<String, TablePermission> perms =
+      ListMultimap<String, Permission> perms =
           ShadedAccessControlUtil.toUserTablePermissions(snapshot.getUsersAndPermissions());
       try (Connection conn = ConnectionFactory.createConnection(conf)) {
-        for (Entry<String, TablePermission> e : perms.entries()) {
+        for (Entry<String, Permission> e : perms.entries()) {
           String user = e.getKey();
-          TablePermission perm = e.getValue();
-          perm.setTableName(newTableName);
-          AccessControlClient.grant(conn, perm.getTableName(), user, perm.getFamily(),
-            perm.getQualifier(), perm.getActions());
+          TablePermission tablePerm = (TablePermission) e.getValue();
+          TablePermission newPerm = new TablePermission(newTableName,
+            tablePerm.getFamily(), tablePerm.getQualifier(), tablePerm.getActions());
+          AccessControlClient.grant(conn, newPerm.getTableName(), user, newPerm.getFamily(),
+            newPerm.getQualifier(), newPerm.getActions());
         }
       } catch (Throwable e) {
         throw new IOException("Grant acl into newly creatd table failed. snapshot: " + snapshot


[04/51] [abbrv] hbase git commit: HBASE-21460 correct Document Configurable Bucket Sizes in bucketCache

Posted by el...@apache.org.
HBASE-21460 correct Document Configurable Bucket Sizes in bucketCache

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a8198759
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a8198759
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a8198759

Branch: refs/heads/HBASE-20952
Commit: a8198759390984bb06701b40d1bbdfb90e251e14
Parents: 130057f
Author: utf7 <ch...@gmail.com>
Authored: Thu Nov 15 23:31:42 2018 +0800
Committer: tedyu <yu...@gmail.com>
Committed: Thu Nov 15 07:52:42 2018 -0800

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/architecture.adoc | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a8198759/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index e1905bc..17e9e13 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -950,14 +950,14 @@ In the above, we set the BucketCache to be 4G.
 We configured the on-heap LruBlockCache have 20% (0.2) of the RegionServer's heap size (0.2 * 5G = 1G). In other words, you configure the L1 LruBlockCache as you would normally (as if there were no L2 cache present).
 
 link:https://issues.apache.org/jira/browse/HBASE-10641[HBASE-10641] introduced the ability to configure multiple sizes for the buckets of the BucketCache, in HBase 0.98 and newer.
-To configurable multiple bucket sizes, configure the new property `hfile.block.cache.sizes` (instead of `hfile.block.cache.size`) to a comma-separated list of block sizes, ordered from smallest to largest, with no spaces.
+To configurable multiple bucket sizes, configure the new property `hbase.bucketcache.bucket.sizes` to a comma-separated list of block sizes, ordered from smallest to largest, with no spaces.
 The goal is to optimize the bucket sizes based on your data access patterns.
 The following example configures buckets of size 4096 and 8192.
 
 [source,xml]
 ----
 <property>
-  <name>hfile.block.cache.sizes</name>
+  <name>hbase.bucketcache.bucket.sizes</name>
   <value>4096,8192</value>
 </property>
 ----


[15/51] [abbrv] hbase git commit: HBASE-21387 Race condition surrounding in progress snapshot handling in snapshot cache leads to loss of snapshot files

Posted by el...@apache.org.
HBASE-21387 Race condition surrounding in progress snapshot handling in snapshot cache leads to loss of snapshot files


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d0dc960
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d0dc960
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d0dc960

Branch: refs/heads/HBASE-20952
Commit: 6d0dc960e6a937b0c1ad1a1dfc0597382aa11221
Parents: 5cc845b
Author: huzheng <op...@gmail.com>
Authored: Tue Nov 20 00:16:12 2018 +0800
Committer: huzheng <op...@gmail.com>
Committed: Fri Nov 23 11:31:37 2018 +0800

----------------------------------------------------------------------
 .../master/snapshot/SnapshotFileCache.java      |  62 +++---
 .../hbase/master/snapshot/SnapshotManager.java  |  48 +++--
 .../master/snapshot/TakeSnapshotHandler.java    |   3 -
 .../snapshot/SnapshotDescriptionUtils.java      |  12 --
 .../master/snapshot/TestSnapshotFileCache.java  |   4 +-
 .../snapshot/TestSnapshotHFileCleaner.java      |   6 +-
 .../snapshot/TestSnapshotWhenChoreCleaning.java | 207 +++++++++++++++++++
 7 files changed, 280 insertions(+), 62 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index 358b4ea..006ca2a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -27,7 +27,7 @@ import java.util.Map;
 import java.util.Set;
 import java.util.Timer;
 import java.util.TimerTask;
-import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.Lock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
@@ -184,22 +184,39 @@ public class SnapshotFileCache implements Stoppable {
     List<FileStatus> unReferencedFiles = Lists.newArrayList();
     List<String> snapshotsInProgress = null;
     boolean refreshed = false;
-    for (FileStatus file : files) {
-      String fileName = file.getPath().getName();
-      if (!refreshed && !cache.contains(fileName)) {
-        refreshCache();
-        refreshed = true;
-      }
-      if (cache.contains(fileName)) {
-        continue;
-      }
-      if (snapshotsInProgress == null) {
-        snapshotsInProgress = getSnapshotsInProgress(snapshotManager);
-      }
-      if (snapshotsInProgress.contains(fileName)) {
-        continue;
+    Lock lock = null;
+    if (snapshotManager != null) {
+      lock = snapshotManager.getTakingSnapshotLock().writeLock();
+    }
+    if (lock == null || lock.tryLock()) {
+      try {
+        if (snapshotManager == null || snapshotManager.isTakingAnySnapshot()) {
+          LOG.warn("Not checking unreferenced files since snapshot is running, it will "
+              + "skip to clean the HFiles this time");
+          return unReferencedFiles;
+        }
+        for (FileStatus file : files) {
+          String fileName = file.getPath().getName();
+          if (!refreshed && !cache.contains(fileName)) {
+            refreshCache();
+            refreshed = true;
+          }
+          if (cache.contains(fileName)) {
+            continue;
+          }
+          if (snapshotsInProgress == null) {
+            snapshotsInProgress = getSnapshotsInProgress();
+          }
+          if (snapshotsInProgress.contains(fileName)) {
+            continue;
+          }
+          unReferencedFiles.add(file);
+        }
+      } finally {
+        if (lock != null) {
+          lock.unlock();
+        }
       }
-      unReferencedFiles.add(file);
     }
     return unReferencedFiles;
   }
@@ -269,19 +286,14 @@ public class SnapshotFileCache implements Stoppable {
     this.snapshots.putAll(known);
   }
 
-  @VisibleForTesting List<String> getSnapshotsInProgress(
-    final SnapshotManager snapshotManager) throws IOException {
+  @VisibleForTesting
+  List<String> getSnapshotsInProgress() throws IOException {
     List<String> snapshotInProgress = Lists.newArrayList();
     // only add those files to the cache, but not to the known snapshots
     Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-    // only add those files to the cache, but not to the known snapshots
     FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
     if (running != null) {
       for (FileStatus run : running) {
-        ReentrantLock lock = null;
-        if (snapshotManager != null) {
-          lock = snapshotManager.getLocks().acquireLock(run.getPath().getName());
-        }
         try {
           snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
         } catch (CorruptedSnapshotException e) {
@@ -293,10 +305,6 @@ public class SnapshotFileCache implements Stoppable {
           } else {
             throw e;
           }
-        } finally {
-          if (lock != null) {
-            lock.unlock();
-          }
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 82acc7f..ae9b6fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -77,7 +79,6 @@ import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
@@ -164,14 +165,12 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   private ExecutorService executorService;
 
   /**
-   *  Locks for snapshot operations
-   *  key is snapshot's filename in progress, value is the related lock
-   *    - create snapshot
-   *    - SnapshotCleaner
-   * */
-  private KeyLocker<String> locks = new KeyLocker<>();
-
-
+   * Read write lock between taking snapshot and snapshot HFile cleaner. The cleaner should skip to
+   * check the HFiles if any snapshot is in progress, otherwise it may clean a HFile which would
+   * belongs to the newly creating snapshot. So we should grab the write lock first when cleaner
+   * start to work. (See HBASE-21387)
+   */
+  private ReentrantReadWriteLock takingSnapshotLock = new ReentrantReadWriteLock(true);
 
   public SnapshotManager() {}
 
@@ -547,14 +546,38 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     }
   }
 
+  public ReadWriteLock getTakingSnapshotLock() {
+    return this.takingSnapshotLock;
+  }
+
+  /**
+   * The snapshot operation processing as following: <br>
+   * 1. Create a Snapshot Handler, and do some initialization; <br>
+   * 2. Put the handler into snapshotHandlers <br>
+   * So when we consider if any snapshot is taking, we should consider both the takingSnapshotLock
+   * and snapshotHandlers;
+   * @return true to indicate that there're some running snapshots.
+   */
+  public synchronized boolean isTakingAnySnapshot() {
+    return this.takingSnapshotLock.getReadHoldCount() > 0 || this.snapshotHandlers.size() > 0;
+  }
+
   /**
    * Take a snapshot based on the enabled/disabled state of the table.
-   *
    * @param snapshot
    * @throws HBaseSnapshotException when a snapshot specific exception occurs.
    * @throws IOException when some sort of generic IO exception occurs.
    */
   public void takeSnapshot(SnapshotDescription snapshot) throws IOException {
+    this.takingSnapshotLock.readLock().lock();
+    try {
+      takeSnapshotInternal(snapshot);
+    } finally {
+      this.takingSnapshotLock.readLock().unlock();
+    }
+  }
+
+  private void takeSnapshotInternal(SnapshotDescription snapshot) throws IOException {
     // check to see if we already completed the snapshot
     if (isSnapshotCompleted(snapshot)) {
       throw new SnapshotExistsException(
@@ -1189,9 +1212,4 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
     builder.setType(SnapshotDescription.Type.FLUSH);
     return builder.build();
   }
-
-  public KeyLocker<String> getLocks() {
-    return locks;
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index d44312a..1dce79f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -24,7 +24,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.CancellationException;
-import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -174,7 +173,6 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
     String msg = "Running " + snapshot.getType() + " table snapshot " + snapshot.getName() + " "
         + eventType + " on table " + snapshotTable;
     LOG.info(msg);
-    ReentrantLock lock = snapshotManager.getLocks().acquireLock(snapshot.getName());
     MasterLock tableLockToRelease = this.tableLock;
     status.setStatus(msg);
     try {
@@ -251,7 +249,6 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
       } catch (IOException e) {
         LOG.error("Couldn't delete snapshot working directory:" + workingDir);
       }
-      lock.unlock();
       tableLockToRelease.release();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
index 0c1e761..39202c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotDescriptionUtils.java
@@ -120,8 +120,6 @@ public final class SnapshotDescriptionUtils {
    */
   public static final String SNAPSHOT_WORKING_DIR = "hbase.snapshot.working.dir";
 
-  /** This tag will be created in in-progess snapshots */
-  public static final String SNAPSHOT_IN_PROGRESS = ".inprogress";
   // snapshot operation values
   /** Default value if no start time is specified */
   public static final long NO_SNAPSHOT_START_TIME_SPECIFIED = 0;
@@ -355,16 +353,6 @@ public final class SnapshotDescriptionUtils {
   }
 
   /**
-   * Create in-progress tag under .tmp of in-progress snapshot
-   * */
-  public static void createInProgressTag(Path workingDir, FileSystem fs) throws IOException {
-    FsPermission perms = FSUtils.getFilePermissions(fs, fs.getConf(),
-      HConstants.DATA_FILE_UMASK_KEY);
-    Path snapshot_in_progress = new Path(workingDir, SnapshotDescriptionUtils.SNAPSHOT_IN_PROGRESS);
-    FSUtils.create(fs, snapshot_in_progress, perms, true);
-  }
-
-  /**
    * Read in the {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} stored for the snapshot in the passed directory
    * @param fs filesystem where the snapshot was taken
    * @param snapshotDir directory where the snapshot was stored

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
index 22d2734..d74b906 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
@@ -147,9 +147,9 @@ public class TestSnapshotFileCache {
     SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
         "test-snapshot-file-cache-refresh", new SnapshotFiles()) {
       @Override
-      List<String> getSnapshotsInProgress(final SnapshotManager snapshotManager)
+      List<String> getSnapshotsInProgress()
               throws IOException {
-        List<String> result = super.getSnapshotsInProgress(snapshotManager);
+        List<String> result = super.getSnapshotsInProgress();
         count.incrementAndGet();
         return result;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
index 0a14f77..08a68be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
@@ -145,7 +145,7 @@ public class TestSnapshotHFileCleaner {
     SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
         "test-snapshot-file-cache-refresh", new SnapshotFiles());
     try {
-      cache.getSnapshotsInProgress(null);
+      cache.getSnapshotsInProgress();
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
@@ -173,7 +173,7 @@ public class TestSnapshotHFileCleaner {
     SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
         "test-snapshot-file-cache-refresh", new SnapshotFiles());
     try {
-      cache.getSnapshotsInProgress(null);
+      cache.getSnapshotsInProgress();
     } catch (CorruptedSnapshotException cse) {
       LOG.info("Expected exception " + cse);
     } finally {
@@ -197,7 +197,7 @@ public class TestSnapshotHFileCleaner {
       long period = Long.MAX_VALUE;
     SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
         "test-snapshot-file-cache-refresh", new SnapshotFiles());
-    cache.getSnapshotsInProgress(null);
+    cache.getSnapshotsInProgress();
     assertFalse(fs.exists(builder.getSnapshotsDir()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d0dc960/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java
new file mode 100644
index 0000000..22591c7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestSnapshotWhenChoreCleaning.java
@@ -0,0 +1,207 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TestTableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner;
+import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.FSVisitor;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
+/**
+ * Test Case for HBASE-21387
+ */
+@Category({ LargeTests.class })
+public class TestSnapshotWhenChoreCleaning {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestSnapshotWhenChoreCleaning.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final Configuration CONF = TEST_UTIL.getConfiguration();
+  private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotClientRetries.class);
+  private static final TableName TABLE_NAME = TableName.valueOf("testTable");
+  private static final int MAX_SPLIT_KEYS_NUM = 100;
+  private static final byte[] FAMILY = Bytes.toBytes("family");
+  private static final byte[] QUALIFIER = Bytes.toBytes("qualifier");
+  private static final byte[] VALUE = Bytes.toBytes("value");
+  private static Table TABLE;
+
+  @Rule
+  public TestTableName TEST_TABLE = new TestTableName();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    // Set the hbase.snapshot.thread.pool.max to 1;
+    CONF.setInt("hbase.snapshot.thread.pool.max", 1);
+    // Enable snapshot
+    CONF.setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
+    // Start MiniCluster.
+    TEST_UTIL.startMiniCluster(3);
+    // Create talbe
+    createTable();
+  }
+
+  private static byte[] integerToBytes(int i) {
+    return Bytes.toBytes(String.format("%06d", i));
+  }
+
+  private static void createTable() throws IOException {
+    byte[][] splitKeys = new byte[MAX_SPLIT_KEYS_NUM][];
+    for (int i = 0; i < splitKeys.length; i++) {
+      splitKeys[i] = integerToBytes(i);
+    }
+    TABLE = TEST_UTIL.createTable(TABLE_NAME, FAMILY, splitKeys);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private static void loadDataAndFlush() throws IOException {
+    for (int i = 0; i < MAX_SPLIT_KEYS_NUM; i++) {
+      Put put = new Put(integerToBytes(i)).addColumn(FAMILY, QUALIFIER,
+        Bytes.add(VALUE, Bytes.toBytes(i)));
+      TABLE.put(put);
+    }
+    TEST_UTIL.flush(TABLE_NAME);
+  }
+
+  private static List<Path> listHFileNames(final FileSystem fs, final Path tableDir)
+      throws IOException {
+    final List<Path> hfiles = new ArrayList<>();
+    FSVisitor.visitTableStoreFiles(fs, tableDir, (region, family, hfileName) -> {
+      hfiles.add(new Path(new Path(new Path(tableDir, region), family), hfileName));
+    });
+    Collections.sort(hfiles);
+    return hfiles;
+  }
+
+  private static boolean isAnySnapshots(FileSystem fs) throws IOException {
+    Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(FSUtils.getRootDir(CONF));
+    FileStatus[] snapFiles = fs.listStatus(snapshotDir);
+    if (snapFiles.length == 0) {
+      return false;
+    }
+    Path firstPath = snapFiles[0].getPath();
+    LOG.info("firstPath in isAnySnapshots: " + firstPath);
+    if (snapFiles.length == 1 && firstPath.getName().equals(".tmp")) {
+      FileStatus[] tmpSnapFiles = fs.listStatus(firstPath);
+      return tmpSnapFiles != null && tmpSnapFiles.length > 0;
+    }
+    return true;
+  }
+
+  @Test
+  public void testSnapshotWhenSnapshotHFileCleanerRunning() throws Exception {
+    // Load data and flush to generate huge number of HFiles.
+    loadDataAndFlush();
+
+    SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
+    cleaner.init(ImmutableMap.of(HMaster.MASTER, TEST_UTIL.getHBaseCluster().getMaster()));
+    cleaner.setConf(CONF);
+
+    FileSystem fs = FSUtils.getCurrentFileSystem(CONF);
+    List<Path> fileNames =
+        listHFileNames(fs, FSUtils.getTableDir(FSUtils.getRootDir(CONF), TABLE_NAME));
+    List<FileStatus> files = new ArrayList<>();
+    for (Path fileName : fileNames) {
+      files.add(fs.getFileStatus(fileName));
+    }
+
+    TEST_UTIL.getAdmin().snapshot("snapshotName_prev", TABLE_NAME);
+    Assert.assertEquals(Lists.newArrayList(cleaner.getDeletableFiles(files)).size(), 0);
+    TEST_UTIL.getAdmin().deleteSnapshot("snapshotName_prev");
+    cleaner.getFileCacheForTesting().triggerCacheRefreshForTesting();
+    Assert.assertEquals(Lists.newArrayList(cleaner.getDeletableFiles(files)).size(), 100);
+
+    Runnable snapshotRunnable = () -> {
+      try {
+        // The thread will be busy on taking snapshot;
+        for (int k = 0; k < 5; k++) {
+          TEST_UTIL.getAdmin().snapshot("snapshotName_" + k, TABLE_NAME);
+        }
+      } catch (Exception e) {
+        LOG.error("Snapshot failed: ", e);
+      }
+    };
+    final AtomicBoolean success = new AtomicBoolean(true);
+    Runnable cleanerRunnable = () -> {
+      try {
+        while (!isAnySnapshots(fs)) {
+          LOG.info("Not found any snapshot, sleep 100ms");
+          Thread.sleep(100);
+        }
+        for (int k = 0; k < 5; k++) {
+          cleaner.getFileCacheForTesting().triggerCacheRefreshForTesting();
+          Iterable<FileStatus> toDeleteFiles = cleaner.getDeletableFiles(files);
+          List<FileStatus> deletableFiles = Lists.newArrayList(toDeleteFiles);
+          LOG.info("Size of deletableFiles is: " + deletableFiles.size());
+          for (int i = 0; i < deletableFiles.size(); i++) {
+            LOG.debug("toDeleteFiles[{}] is: {}", i, deletableFiles.get(i));
+          }
+          if (deletableFiles.size() > 0) {
+            success.set(false);
+          }
+        }
+      } catch (Exception e) {
+        LOG.error("Chore cleaning failed: ", e);
+      }
+    };
+    Thread t1 = new Thread(snapshotRunnable);
+    t1.start();
+    Thread t2 = new Thread(cleanerRunnable);
+    t2.start();
+    t1.join();
+    t2.join();
+    Assert.assertTrue(success.get());
+  }
+}


[42/51] [abbrv] hbase git commit: HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which allows CPs to modify the TableDescriptor

Posted by el...@apache.org.
HBASE-21550 Add a new method preCreateTableRegionInfos for MasterObserver which allows CPs to modify the TableDescriptor


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f49baf25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f49baf25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f49baf25

Branch: refs/heads/HBASE-20952
Commit: f49baf259ec6bc2c8634debd2dbfc592753245d3
Parents: 8bf966c
Author: Duo Zhang <zh...@apache.org>
Authored: Wed Dec 5 18:19:15 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Thu Dec 6 08:30:32 2018 +0800

----------------------------------------------------------------------
 .../hbase/coprocessor/MasterObserver.java       | 15 +++++
 .../org/apache/hadoop/hbase/master/HMaster.java | 68 ++++++++++----------
 .../hbase/master/MasterCoprocessorHost.java     | 14 ++++
 .../hbase/coprocessor/TestMasterObserver.java   | 14 +++-
 4 files changed, 75 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 573ac7a..a0863e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -70,6 +70,21 @@ import org.apache.yetus.audience.InterfaceStability;
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
 @InterfaceStability.Evolving
 public interface MasterObserver {
+
+  /**
+   * Called before we create the region infos for this table. Called as part of create table RPC
+   * call.
+   * @param ctx the environment to interact with the framework and master
+   * @param desc the TableDescriptor for the table
+   * @return the TableDescriptor used to create the table. Default is the one passed in. Return
+   *         {@code null} means cancel the creation.
+   */
+  default TableDescriptor preCreateTableRegionsInfos(
+      final ObserverContext<MasterCoprocessorEnvironment> ctx, TableDescriptor desc)
+      throws IOException {
+    return desc;
+  }
+
   /**
    * Called before a new table is created by
    * {@link org.apache.hadoop.hbase.master.HMaster}.  Called as part of create

http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 132e271..e96dc36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2030,45 +2030,45 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public long createTable(
-      final TableDescriptor tableDescriptor,
-      final byte [][] splitKeys,
-      final long nonceGroup,
-      final long nonce) throws IOException {
+  public long createTable(final TableDescriptor tableDescriptor, final byte[][] splitKeys,
+      final long nonceGroup, final long nonce) throws IOException {
     checkInitialized();
-
-    String namespace = tableDescriptor.getTableName().getNamespaceAsString();
+    TableDescriptor desc = getMasterCoprocessorHost().preCreateTableRegionsInfos(tableDescriptor);
+    if (desc == null) {
+      throw new IOException("Creation for " + tableDescriptor + " is canceled by CP");
+    }
+    String namespace = desc.getTableName().getNamespaceAsString();
     this.clusterSchemaService.getNamespace(namespace);
 
-    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(tableDescriptor, splitKeys);
-    sanityCheckTableDescriptor(tableDescriptor);
-
-    return MasterProcedureUtil.submitProcedure(
-        new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-      @Override
-      protected void run() throws IOException {
-        getMaster().getMasterCoprocessorHost().preCreateTable(tableDescriptor, newRegions);
-
-        LOG.info(getClientIdAuditPrefix() + " create " + tableDescriptor);
-
-        // TODO: We can handle/merge duplicate requests, and differentiate the case of
-        //       TableExistsException by saying if the schema is the same or not.
-        //
-        // We need to wait for the procedure to potentially fail due to "prepare" sanity
-        // checks. This will block only the beginning of the procedure. See HBASE-19953.
-        ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
-        submitProcedure(new CreateTableProcedure(
-            procedureExecutor.getEnvironment(), tableDescriptor, newRegions, latch));
-        latch.await();
+    RegionInfo[] newRegions = ModifyRegionUtils.createRegionInfos(desc, splitKeys);
+    sanityCheckTableDescriptor(desc);
 
-        getMaster().getMasterCoprocessorHost().postCreateTable(tableDescriptor, newRegions);
-      }
+    return MasterProcedureUtil
+      .submitProcedure(new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
+        @Override
+        protected void run() throws IOException {
+          getMaster().getMasterCoprocessorHost().preCreateTable(desc, newRegions);
+
+          LOG.info(getClientIdAuditPrefix() + " create " + desc);
+
+          // TODO: We can handle/merge duplicate requests, and differentiate the case of
+          // TableExistsException by saying if the schema is the same or not.
+          //
+          // We need to wait for the procedure to potentially fail due to "prepare" sanity
+          // checks. This will block only the beginning of the procedure. See HBASE-19953.
+          ProcedurePrepareLatch latch = ProcedurePrepareLatch.createBlockingLatch();
+          submitProcedure(
+            new CreateTableProcedure(procedureExecutor.getEnvironment(), desc, newRegions, latch));
+          latch.await();
+
+          getMaster().getMasterCoprocessorHost().postCreateTable(desc, newRegions);
+        }
 
-      @Override
-      protected String getDescription() {
-        return "CreateTableProcedure";
-      }
-    });
+        @Override
+        protected String getDescription() {
+          return "CreateTableProcedure";
+        }
+      });
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 019c64f..51e30c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -317,6 +317,20 @@ public class MasterCoprocessorHost
 
   /* Implementation of hooks for invoking MasterObservers */
 
+  public TableDescriptor preCreateTableRegionsInfos(TableDescriptor desc) throws IOException {
+    if (coprocEnvironments.isEmpty()) {
+      return desc;
+    }
+    return execOperationWithResult(
+      new ObserverOperationWithResult<MasterObserver, TableDescriptor>(masterObserverGetter, desc) {
+
+        @Override
+        protected TableDescriptor call(MasterObserver observer) throws IOException {
+          return observer.preCreateTableRegionsInfos(this, getResult());
+        }
+      });
+  }
+
   public void preCreateTable(final TableDescriptor htd, final RegionInfo[] regions)
       throws IOException {
     execOperation(coprocEnvironments.isEmpty() ? null : new MasterObserverOperation() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f49baf25/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index a606e27..d8a5b4c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -94,6 +94,7 @@ public class TestMasterObserver {
 
   public static class CPMasterObserver implements MasterCoprocessor, MasterObserver {
 
+    private boolean preCreateTableRegionInfosCalled;
     private boolean preCreateTableCalled;
     private boolean postCreateTableCalled;
     private boolean preDeleteTableCalled;
@@ -186,6 +187,7 @@ public class TestMasterObserver {
     private boolean postLockHeartbeatCalled;
 
     public void resetStates() {
+      preCreateTableRegionInfosCalled = false;
       preCreateTableCalled = false;
       postCreateTableCalled = false;
       preDeleteTableCalled = false;
@@ -298,6 +300,14 @@ public class TestMasterObserver {
     }
 
     @Override
+    public TableDescriptor preCreateTableRegionsInfos(
+        ObserverContext<MasterCoprocessorEnvironment> ctx, TableDescriptor desc)
+        throws IOException {
+      preCreateTableRegionInfosCalled = true;
+      return desc;
+    }
+
+    @Override
     public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> env,
         TableDescriptor desc, RegionInfo[] regions) throws IOException {
       preCreateTableCalled = true;
@@ -310,11 +320,11 @@ public class TestMasterObserver {
     }
 
     public boolean wasCreateTableCalled() {
-      return preCreateTableCalled && postCreateTableCalled;
+      return preCreateTableRegionInfosCalled && preCreateTableCalled && postCreateTableCalled;
     }
 
     public boolean preCreateTableCalledOnly() {
-      return preCreateTableCalled && !postCreateTableCalled;
+      return preCreateTableRegionInfosCalled && preCreateTableCalled && !postCreateTableCalled;
     }
 
     @Override


[08/51] [abbrv] hbase git commit: HBASE-21480 Taking snapshot when RS crashes prevent we bring the regions online

Posted by el...@apache.org.
HBASE-21480 Taking snapshot when RS crashes prevent we bring the regions online


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f555258e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f555258e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f555258e

Branch: refs/heads/HBASE-20952
Commit: f555258e7abab1337ee4d39aaa1dafff72be287b
Parents: f874232
Author: zhangduo <zh...@apache.org>
Authored: Sat Nov 17 14:40:15 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sun Nov 18 21:07:22 2018 +0800

----------------------------------------------------------------------
 .../assignment/AssignmentManagerUtil.java       |  1 -
 .../assignment/MergeTableRegionsProcedure.java  |  7 ++
 .../assignment/SplitTableRegionProcedure.java   |  7 ++
 .../snapshot/DisabledTableSnapshotHandler.java  |  7 ++
 .../snapshot/EnabledTableSnapshotHandler.java   |  8 ++
 .../hbase/master/snapshot/SnapshotManager.java  |  2 +-
 .../master/snapshot/TakeSnapshotHandler.java    | 23 ++++-
 .../snapshot/TestSnapshotWhileRSCrashes.java    | 98 ++++++++++++++++++++
 8 files changed, 149 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
index 36ac6cf..97ae7ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManagerUtil.java
@@ -25,7 +25,6 @@ import java.util.ListIterator;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 import java.util.stream.Stream;
-
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.ServerName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index d3b209e..7811d9b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -480,6 +480,13 @@ public class MergeTableRegionsProcedure
    * Prepare merge and do some check
    */
   private boolean prepareMergeRegion(final MasterProcedureEnv env) throws IOException {
+    // Fail if we are taking snapshot for the given table
+    if (env.getMasterServices().getSnapshotManager()
+      .isTakingSnapshot(regionsToMerge[0].getTable())) {
+      throw new MergeRegionException(
+        "Skip merging regions " + RegionInfo.getShortNameToLog(regionsToMerge) +
+          ", because we are taking snapshot for the table " + regionsToMerge[0].getTable());
+    }
     // Note: the following logic assumes that we only have 2 regions to merge.  In the future,
     // if we want to extend to more than 2 regions, the code needs to be modified a little bit.
     CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index bcee897..b66d91f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -454,6 +454,13 @@ public class SplitTableRegionProcedure
    */
   @VisibleForTesting
   public boolean prepareSplitRegion(final MasterProcedureEnv env) throws IOException {
+    // Fail if we are taking snapshot for the given table
+    if (env.getMasterServices().getSnapshotManager()
+      .isTakingSnapshot(getParentRegion().getTable())) {
+      setFailure(new IOException("Skip splitting region " + getParentRegion().getShortNameToLog() +
+        ", because we are taking snapshot for the table " + getParentRegion().getTable()));
+      return false;
+    }
     // Check whether the region is splittable
     RegionStateNode node =
         env.getAssignmentManager().getRegionStates().getRegionStateNode(getParentRegion());

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
index b1b6886..39b8425 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/DisabledTableSnapshotHandler.java
@@ -122,4 +122,11 @@ public class DisabledTableSnapshotHandler extends TakeSnapshotHandler {
           + " as finished.");
     }
   }
+
+  @Override
+  protected boolean downgradeToSharedTableLock() {
+    // for taking snapshot on disabled table, it is OK to always hold the exclusive lock, as we do
+    // not need to assign the regions when there are region server crashes.
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
index 445685c..3a6a813 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/EnabledTableSnapshotHandler.java
@@ -132,4 +132,12 @@ public class EnabledTableSnapshotHandler extends TakeSnapshotHandler {
     monitor.rethrowException();
     status.setStatus("Completed referencing HFiles for the mob region of table: " + snapshotTable);
   }
+
+  @Override
+  protected boolean downgradeToSharedTableLock() {
+    // return true here to change from exclusive lock to shared lock, so we can still assign regions
+    // while taking snapshots. This is important, as region server crash can happen at any time, if
+    // we can not assign regions then the cluster will be in trouble as the regions can not online.
+    return true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index e496b44..82acc7f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -420,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    * @param tableName name of the table being snapshotted.
    * @return <tt>true</tt> if there is a snapshot in progress on the specified table.
    */
-  synchronized boolean isTakingSnapshot(final TableName tableName) {
+  public synchronized boolean isTakingSnapshot(final TableName tableName) {
     SnapshotSentinel handler = this.snapshotHandlers.get(tableName);
     return handler != null && !handler.isFinished();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index f6b48cb..d44312a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.MetricsSnapshot;
 import org.apache.hadoop.hbase.master.SnapshotSentinel;
 import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockManager.MasterLock;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure2.LockType;
@@ -88,7 +89,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   protected final Path workingDir;
   private final MasterSnapshotVerifier verifier;
   protected final ForeignExceptionDispatcher monitor;
-  protected final LockManager.MasterLock tableLock;
+  private final LockManager.MasterLock tableLock;
   protected final MonitoredTask status;
   protected final TableName snapshotTable;
   protected final SnapshotManifest snapshotManifest;
@@ -174,8 +175,16 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
         + eventType + " on table " + snapshotTable;
     LOG.info(msg);
     ReentrantLock lock = snapshotManager.getLocks().acquireLock(snapshot.getName());
+    MasterLock tableLockToRelease = this.tableLock;
     status.setStatus(msg);
     try {
+      if (downgradeToSharedTableLock()) {
+        // release the exclusive lock and hold the shared lock instead
+        tableLockToRelease = master.getLockManager().createMasterLock(snapshotTable,
+          LockType.SHARED, this.getClass().getName() + ": take snapshot " + snapshot.getName());
+        tableLock.release();
+        tableLockToRelease.acquire();
+      }
       // If regions move after this meta scan, the region specific snapshot should fail, triggering
       // an external exception that gets captured here.
 
@@ -243,7 +252,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
         LOG.error("Couldn't delete snapshot working directory:" + workingDir);
       }
       lock.unlock();
-      tableLock.release();
+      tableLockToRelease.release();
     }
   }
 
@@ -283,6 +292,16 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   }
 
   /**
+   * When taking snapshot, first we must acquire the exclusive table lock to confirm that there are
+   * no ongoing merge/split procedures. But later, we should try our best to release the exclusive
+   * lock as this may hurt the availability, because we need to hold the shared lock when assigning
+   * regions.
+   * <p/>
+   * See HBASE-21480 for more details.
+   */
+  protected abstract boolean downgradeToSharedTableLock();
+
+  /**
    * Snapshot the specified regions
    */
   protected abstract void snapshotRegions(List<Pair<RegionInfo, ServerName>> regions)

http://git-wip-us.apache.org/repos/asf/hbase/blob/f555258e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotWhileRSCrashes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotWhileRSCrashes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotWhileRSCrashes.java
new file mode 100644
index 0000000..46dc99f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotWhileRSCrashes.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.snapshot;
+
+import static org.junit.Assert.assertNull;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.locking.LockManager.MasterLock;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestSnapshotWhileRSCrashes {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestSnapshotWhileRSCrashes.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static TableName NAME = TableName.valueOf("Cleanup");
+
+  private static byte[] CF = Bytes.toBytes("cf");
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.startMiniCluster(3);
+    UTIL.createMultiRegionTable(NAME, CF);
+    UTIL.waitTableAvailable(NAME);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws InterruptedException, IOException {
+    String snName = "sn";
+    MasterLock lock = UTIL.getMiniHBaseCluster().getMaster().getLockManager().createMasterLock(NAME,
+      LockType.EXCLUSIVE, "for testing");
+    lock.acquire();
+    Thread t = new Thread(() -> {
+      try {
+        UTIL.getAdmin().snapshot(snName, NAME);
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    });
+    t.setDaemon(true);
+    t.start();
+    ProcedureExecutor<MasterProcedureEnv> procExec =
+      UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
+    UTIL.waitFor(10000,
+      () -> procExec.getProcedures().stream().filter(p -> !p.isFinished())
+        .filter(p -> p instanceof LockProcedure).map(p -> (LockProcedure) p)
+        .filter(p -> NAME.equals(p.getTableName())).anyMatch(p -> !p.isLocked()));
+    UTIL.getMiniHBaseCluster().stopRegionServer(0);
+    lock.release();
+    // the snapshot can not work properly when there are rs crashes, so here we just want to make
+    // sure that the regions could online
+    try (Table table = UTIL.getConnection().getTable(NAME);
+        ResultScanner scanner = table.getScanner(CF)) {
+      assertNull(scanner.next());
+    }
+  }
+}


[18/51] [abbrv] hbase git commit: HBASE-21511 Remove in progress snapshot check in SnapshotFileCache#getUnreferencedFiles

Posted by el...@apache.org.
HBASE-21511 Remove in progress snapshot check in SnapshotFileCache#getUnreferencedFiles


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/27c0bf5c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/27c0bf5c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/27c0bf5c

Branch: refs/heads/HBASE-20952
Commit: 27c0bf5c6373e805db8552b9aae46a887f5cc0a9
Parents: 701526d
Author: Ted Yu <yu...@gmail.com>
Authored: Sun Nov 25 18:20:00 2018 -0800
Committer: Ted Yu <yu...@gmail.com>
Committed: Sun Nov 25 18:20:00 2018 -0800

----------------------------------------------------------------------
 .../master/snapshot/SnapshotFileCache.java      | 34 -------
 .../master/snapshot/TestSnapshotFileCache.java  | 94 +-------------------
 .../snapshot/TestSnapshotHFileCleaner.java      | 44 +--------
 3 files changed, 4 insertions(+), 168 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/27c0bf5c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index 522b1c9..1524ecd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -42,7 +41,6 @@ import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 
 /**
@@ -182,7 +180,6 @@ public class SnapshotFileCache implements Stoppable {
       final SnapshotManager snapshotManager)
       throws IOException {
     List<FileStatus> unReferencedFiles = Lists.newArrayList();
-    List<String> snapshotsInProgress = null;
     boolean refreshed = false;
     Lock lock = null;
     if (snapshotManager != null) {
@@ -204,12 +201,6 @@ public class SnapshotFileCache implements Stoppable {
           if (cache.contains(fileName)) {
             continue;
           }
-          if (snapshotsInProgress == null) {
-            snapshotsInProgress = getSnapshotsInProgress();
-          }
-          if (snapshotsInProgress.contains(fileName)) {
-            continue;
-          }
           unReferencedFiles.add(file);
         }
       } finally {
@@ -286,31 +277,6 @@ public class SnapshotFileCache implements Stoppable {
     this.snapshots.putAll(known);
   }
 
-  @VisibleForTesting
-  List<String> getSnapshotsInProgress() throws IOException {
-    List<String> snapshotInProgress = Lists.newArrayList();
-    // only add those files to the cache, but not to the known snapshots
-    Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-    FileStatus[] running = FSUtils.listStatus(fs, snapshotTmpDir);
-    if (running != null) {
-      for (FileStatus run : running) {
-        try {
-          snapshotInProgress.addAll(fileInspector.filesUnderSnapshot(run.getPath()));
-        } catch (CorruptedSnapshotException e) {
-          // See HBASE-16464
-          if (e.getCause() instanceof FileNotFoundException) {
-            // If the snapshot is corrupt, we will delete it
-            fs.delete(run.getPath(), true);
-            LOG.warn("delete the " + run.getPath() + " due to exception:", e.getCause());
-          } else {
-            throw e;
-          }
-        }
-      }
-    }
-    return snapshotInProgress;
-  }
-
   /**
    * Simple helper task that just periodically attempts to refresh the cache
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/27c0bf5c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
index d74b906..7ef5477 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotFileCache.java
@@ -17,11 +17,8 @@
  */
 package org.apache.hadoop.hbase.master.snapshot;
 
-import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -29,13 +26,11 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
@@ -51,11 +46,6 @@ import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
-
-import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
-
 /**
  * Test that we correctly reload the cache, filter directories, etc.
  */
@@ -98,10 +88,8 @@ public class TestSnapshotFileCache {
         "test-snapshot-file-cache-refresh", new SnapshotFiles());
 
     createAndTestSnapshotV1(cache, "snapshot1a", false, true);
-    createAndTestSnapshotV1(cache, "snapshot1b", true, true);
 
     createAndTestSnapshotV2(cache, "snapshot2a", false, true);
-    createAndTestSnapshotV2(cache, "snapshot2b", true, true);
   }
 
   @Test
@@ -130,78 +118,6 @@ public class TestSnapshotFileCache {
     // Add a new non-tmp snapshot
     createAndTestSnapshotV1(cache, "snapshot0v1", false, false);
     createAndTestSnapshotV1(cache, "snapshot0v2", false, false);
-
-    // Add a new tmp snapshot
-    createAndTestSnapshotV2(cache, "snapshot1", true, false);
-
-    // Add another tmp snapshot
-    createAndTestSnapshotV2(cache, "snapshot2", true, false);
-  }
-
-  @Test
-  public void testWeNeverCacheTmpDirAndLoadIt() throws Exception {
-
-    final AtomicInteger count = new AtomicInteger(0);
-    // don't refresh the cache unless we tell it to
-    long period = Long.MAX_VALUE;
-    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
-        "test-snapshot-file-cache-refresh", new SnapshotFiles()) {
-      @Override
-      List<String> getSnapshotsInProgress()
-              throws IOException {
-        List<String> result = super.getSnapshotsInProgress();
-        count.incrementAndGet();
-        return result;
-      }
-
-      @Override public void triggerCacheRefreshForTesting() {
-        super.triggerCacheRefreshForTesting();
-      }
-    };
-
-    SnapshotMock.SnapshotBuilder complete =
-        createAndTestSnapshotV1(cache, "snapshot", false, false);
-
-    int countBeforeCheck = count.get();
-
-    FSUtils.logFileSystemState(fs, rootDir, LOG);
-
-    List<FileStatus> allStoreFiles = getStoreFilesForSnapshot(complete);
-    Iterable<FileStatus> deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null);
-    assertTrue(Iterables.isEmpty(deletableFiles));
-    // no need for tmp dir check as all files are accounted for.
-    assertEquals(0, count.get() - countBeforeCheck);
-
-
-    // add a random file to make sure we refresh
-    FileStatus randomFile = mockStoreFile(UTIL.getRandomUUID().toString());
-    allStoreFiles.add(randomFile);
-    deletableFiles = cache.getUnreferencedFiles(allStoreFiles, null);
-    assertEquals(randomFile, Iterables.getOnlyElement(deletableFiles));
-    assertEquals(1, count.get() - countBeforeCheck); // we check the tmp directory
-  }
-
-  private List<FileStatus> getStoreFilesForSnapshot(SnapshotMock.SnapshotBuilder builder)
-      throws IOException {
-    final List<FileStatus> allStoreFiles = Lists.newArrayList();
-    SnapshotReferenceUtil
-        .visitReferencedFiles(UTIL.getConfiguration(), fs, builder.getSnapshotsDir(),
-            new SnapshotReferenceUtil.SnapshotVisitor() {
-              @Override public void storeFile(RegionInfo regionInfo, String familyName,
-                  SnapshotProtos.SnapshotRegionManifest.StoreFile storeFile) throws IOException {
-                FileStatus status = mockStoreFile(storeFile.getName());
-                allStoreFiles.add(status);
-              }
-            });
-    return allStoreFiles;
-  }
-
-  private FileStatus mockStoreFile(String storeFileName) {
-    FileStatus status = mock(FileStatus.class);
-    Path path = mock(Path.class);
-    when(path.getName()).thenReturn(storeFileName);
-    when(status.getPath()).thenReturn(path);
-    return status;
   }
 
   class SnapshotFiles implements SnapshotFileCache.SnapshotFileInspector {
@@ -234,20 +150,12 @@ public class TestSnapshotFileCache {
     List<Path> files = new ArrayList<>();
     for (int i = 0; i < 3; ++i) {
       for (Path filePath: builder.addRegion()) {
-        if (tmp) {
-          // We should be able to find all the files while the snapshot creation is in-progress
-          FSUtils.logFileSystemState(fs, rootDir, LOG);
-          assertFalse("Cache didn't find " + filePath,
-            contains(getNonSnapshotFiles(cache, filePath), filePath));
-        }
         files.add(filePath);
       }
     }
 
     // Finalize the snapshot
-    if (!tmp) {
-      builder.commit();
-    }
+    builder.commit();
 
     // Make sure that all files are still present
     for (Path path: files) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/27c0bf5c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
index 08a68be..76c2a4b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestSnapshotHFileCleaner.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils;
@@ -141,17 +140,8 @@ public class TestSnapshotHFileCleaner {
     builder.addRegionV2();
     builder.corruptOneRegionManifest();
 
-    long period = Long.MAX_VALUE;
-    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
-        "test-snapshot-file-cache-refresh", new SnapshotFiles());
-    try {
-      cache.getSnapshotsInProgress();
-    } catch (CorruptedSnapshotException cse) {
-      LOG.info("Expected exception " + cse);
-    } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
-          TEST_UTIL.getConfiguration()), true);
-    }
+    fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir, TEST_UTIL.getConfiguration()),
+      true);
   }
 
   /**
@@ -169,35 +159,7 @@ public class TestSnapshotHFileCleaner {
     builder.consolidate();
     builder.corruptDataManifest();
 
-    long period = Long.MAX_VALUE;
-    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
-        "test-snapshot-file-cache-refresh", new SnapshotFiles());
-    try {
-      cache.getSnapshotsInProgress();
-    } catch (CorruptedSnapshotException cse) {
-      LOG.info("Expected exception " + cse);
-    } finally {
-      fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
+    fs.delete(SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir,
           TEST_UTIL.getConfiguration()), true);
-    }
-  }
-
-  /**
-  * HBASE-16464
-  */
-  @Test
-  public void testMissedTmpSnapshot() throws IOException {
-    SnapshotTestingUtils.SnapshotMock
-        snapshotMock = new SnapshotTestingUtils.SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
-    SnapshotTestingUtils.SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2(
-        SNAPSHOT_NAME_STR, TABLE_NAME_STR);
-    builder.addRegionV2();
-    builder.missOneRegionSnapshotFile();
-
-      long period = Long.MAX_VALUE;
-    SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
-        "test-snapshot-file-cache-refresh", new SnapshotFiles());
-    cache.getSnapshotsInProgress();
-    assertFalse(fs.exists(builder.getSnapshotsDir()));
   }
 }


[36/51] [abbrv] hbase git commit: HBASE-21479 Individual tests in TestHRegionReplayEvents class are failing

Posted by el...@apache.org.
HBASE-21479 Individual tests in TestHRegionReplayEvents class are failing

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5c209f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5c209f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5c209f2c

Branch: refs/heads/HBASE-20952
Commit: 5c209f2cec19d179702f104f6758c3bcb1512eb6
Parents: 766aa1b
Author: Peter Somogyi <ps...@apache.org>
Authored: Fri Nov 30 16:39:36 2018 +0100
Committer: Peter Somogyi <ps...@apache.org>
Committed: Sat Dec 1 09:36:10 2018 +0100

----------------------------------------------------------------------
 .../regionserver/TestHRegionReplayEvents.java   | 32 +++++++++++++-------
 1 file changed, 21 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5c209f2c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 308dc03..3b3b8c3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -66,7 +66,7 @@ import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl;
 import org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult;
 import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
@@ -80,7 +80,9 @@ import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
 import org.junit.Test;
@@ -108,7 +110,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescript
  * Tests of HRegion methods for replaying flush, compaction, region open, etc events for secondary
  * region replicas
  */
-@Category(MediumTests.class)
+@Category(LargeTests.class)
 public class TestHRegionReplayEvents {
 
   @ClassRule
@@ -120,7 +122,7 @@ public class TestHRegionReplayEvents {
 
   private static HBaseTestingUtility TEST_UTIL;
 
-  public static Configuration CONF ;
+  public static Configuration CONF;
   private String dir;
 
   private byte[][] families = new byte[][] {
@@ -136,17 +138,27 @@ public class TestHRegionReplayEvents {
   // per test fields
   private Path rootDir;
   private TableDescriptor htd;
-  private long time;
   private RegionServerServices rss;
   private RegionInfo primaryHri, secondaryHri;
   private HRegion primaryRegion, secondaryRegion;
-  private WALFactory wals;
   private WAL walPrimary, walSecondary;
   private WAL.Reader reader;
 
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL = new HBaseTestingUtility();
+    TEST_UTIL.startMiniDFSCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
+    TEST_UTIL.cleanupTestDir();
+    TEST_UTIL.shutdownMiniDFSCluster();
+  }
+
   @Before
-  public void setup() throws IOException {
-    TEST_UTIL = HBaseTestingUtility.createLocalHTU();
+  public void setUp() throws Exception {
     CONF = TEST_UTIL.getConfiguration();
     dir = TEST_UTIL.getDataTestDir("TestHRegionReplayEvents").toString();
     method = name.getMethodName();
@@ -160,14 +172,14 @@ public class TestHRegionReplayEvents {
     }
     htd = builder.build();
 
-    time = System.currentTimeMillis();
+    long time = System.currentTimeMillis();
     ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
     primaryHri =
         RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(0).build();
     secondaryHri =
         RegionInfoBuilder.newBuilder(htd.getTableName()).setRegionId(time).setReplicaId(1).build();
 
-    wals = TestHRegion.createWALFactory(CONF, rootDir);
+    WALFactory wals = TestHRegion.createWALFactory(CONF, rootDir);
     walPrimary = wals.getWAL(primaryHri);
     walSecondary = wals.getWAL(secondaryHri);
 
@@ -207,8 +219,6 @@ public class TestHRegionReplayEvents {
     }
 
     EnvironmentEdgeManagerTestHelper.reset();
-    LOG.info("Cleaning test directory: " + TEST_UTIL.getDataTestDir());
-    TEST_UTIL.cleanupTestDir();
   }
 
   String getName() {


[29/51] [abbrv] hbase git commit: HBASE-21300 Fix the wrong reference file path when restoring snapshots for tables with MOB columns

Posted by el...@apache.org.
HBASE-21300 Fix the wrong reference file path when restoring snapshots for tables with MOB columns

Signed-off-by: Guanghao Zhang <zg...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1f2b5a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1f2b5a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1f2b5a0

Branch: refs/heads/HBASE-20952
Commit: f1f2b5a038d730e68720503331951a3c7edb053d
Parents: d6e1d18
Author: meiyi <my...@gamil.com>
Authored: Tue Nov 27 14:32:40 2018 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Thu Nov 29 11:23:45 2018 +0800

----------------------------------------------------------------------
 .../procedure/CloneSnapshotProcedure.java       | 31 ++++++++-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   | 38 ++++++++++-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  2 +-
 .../snapshot/TestMobRestoreSnapshotHelper.java  | 16 ++++-
 .../snapshot/TestRestoreSnapshotHelper.java     | 69 +++++++++++++++++++-
 5 files changed, 147 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f2b5a0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index 9ba3485..d351d67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.master.MetricsSnapshot;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure.CreateHdfsRegions;
+import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
@@ -470,11 +471,39 @@ public class CloneSnapshotProcedure
 
     // 3. Move Table temp directory to the hbase root location
     CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);
-
+    // Move Table temp mob directory to the hbase root location
+    Path tempMobTableDir = MobUtils.getMobTableDir(tempdir, tableDescriptor.getTableName());
+    if (mfs.getFileSystem().exists(tempMobTableDir)) {
+      moveTempMobDirectoryToHBaseRoot(mfs, tableDescriptor, tempMobTableDir);
+    }
     return newRegions;
   }
 
   /**
+   * Move table temp mob directory to the hbase root location
+   * @param mfs The master file system
+   * @param tableDescriptor The table to operate on
+   * @param tempMobTableDir The temp mob directory of table
+   * @throws IOException If failed to move temp mob dir to hbase root dir
+   */
+  private void moveTempMobDirectoryToHBaseRoot(final MasterFileSystem mfs,
+      final TableDescriptor tableDescriptor, final Path tempMobTableDir) throws IOException {
+    FileSystem fs = mfs.getFileSystem();
+    final Path tableMobDir =
+        MobUtils.getMobTableDir(mfs.getRootDir(), tableDescriptor.getTableName());
+    if (!fs.delete(tableMobDir, true) && fs.exists(tableMobDir)) {
+      throw new IOException("Couldn't delete mob table " + tableMobDir);
+    }
+    if (!fs.exists(tableMobDir.getParent())) {
+      fs.mkdirs(tableMobDir.getParent());
+    }
+    if (!fs.rename(tempMobTableDir, tableMobDir)) {
+      throw new IOException("Unable to move mob table from temp=" + tempMobTableDir
+          + " to hbase root=" + tableMobDir);
+    }
+  }
+
+  /**
    * Add regions to hbase:meta table.
    * @param env MasterProcedureEnv
    * @throws IOException

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f2b5a0/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 9fa4e4c..304a62e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -367,7 +367,17 @@ public final class MobUtils {
    */
   public static Path getMobHome(Configuration conf) {
     Path hbaseDir = new Path(conf.get(HConstants.HBASE_DIR));
-    return new Path(hbaseDir, MobConstants.MOB_DIR_NAME);
+    return getMobHome(hbaseDir);
+  }
+
+  /**
+   * Gets the root dir of the mob files under the qualified HBase root dir.
+   * It's {rootDir}/mobdir.
+   * @param rootDir The qualified path of HBase root directory.
+   * @return The root dir of the mob file.
+   */
+  public static Path getMobHome(Path rootDir) {
+    return new Path(rootDir, MobConstants.MOB_DIR_NAME);
   }
 
   /**
@@ -384,14 +394,36 @@ public final class MobUtils {
   }
 
   /**
+   * Gets the table dir of the mob files under the qualified HBase root dir.
+   * It's {rootDir}/mobdir/data/${namespace}/${tableName}
+   * @param rootDir The qualified path of HBase root directory.
+   * @param tableName The name of table.
+   * @return The table dir of the mob file.
+   */
+  public static Path getMobTableDir(Path rootDir, TableName tableName) {
+    return FSUtils.getTableDir(getMobHome(rootDir), tableName);
+  }
+
+  /**
    * Gets the region dir of the mob files.
-   * It's {HBASE_DIR}/mobdir/{namespace}/{tableName}/{regionEncodedName}.
+   * It's {HBASE_DIR}/mobdir/data/{namespace}/{tableName}/{regionEncodedName}.
    * @param conf The current configuration.
    * @param tableName The current table name.
    * @return The region dir of the mob files.
    */
   public static Path getMobRegionPath(Configuration conf, TableName tableName) {
-    Path tablePath = FSUtils.getTableDir(getMobHome(conf), tableName);
+    return getMobRegionPath(new Path(conf.get(HConstants.HBASE_DIR)), tableName);
+  }
+
+  /**
+   * Gets the region dir of the mob files under the specified root dir.
+   * It's {rootDir}/mobdir/data/{namespace}/{tableName}/{regionEncodedName}.
+   * @param rootDir The qualified path of HBase root directory.
+   * @param tableName The current table name.
+   * @return The region dir of the mob files.
+   */
+  public static Path getMobRegionPath(Path rootDir, TableName tableName) {
+    Path tablePath = FSUtils.getTableDir(getMobHome(rootDir), tableName);
     RegionInfo regionInfo = getMobRegionInfo(tableName);
     return new Path(tablePath, regionInfo.getEncodedName());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f2b5a0/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index 6aa378c..0acfb1a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -622,7 +622,7 @@ public class RestoreSnapshotHelper {
   private void cloneHdfsMobRegion(final Map<String, SnapshotRegionManifest> regionManifests,
       final RegionInfo region) throws IOException {
     // clone region info (change embedded tableName with the new one)
-    Path clonedRegionPath = MobUtils.getMobRegionPath(conf, tableDesc.getTableName());
+    Path clonedRegionPath = MobUtils.getMobRegionPath(rootDir, tableDesc.getTableName());
     cloneRegion(clonedRegionPath, region, regionManifests.get(region.getEncodedName()));
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f2b5a0/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
index df8eb68..10c67ad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobRestoreSnapshotHelper.java
@@ -20,9 +20,12 @@ package org.apache.hadoop.hbase.snapshot;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.snapshot.MobSnapshotTestingUtils.SnapshotMock;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
 import org.slf4j.Logger;
@@ -31,7 +34,7 @@ import org.slf4j.LoggerFactory;
 /**
  * Test the restore/clone operation from a file-system point of view.
  */
-@Category(SmallTests.class)
+@Category(MediumTests.class)
 public class TestMobRestoreSnapshotHelper extends TestRestoreSnapshotHelper {
 
   @ClassRule
@@ -49,4 +52,13 @@ public class TestMobRestoreSnapshotHelper extends TestRestoreSnapshotHelper {
   protected SnapshotMock createSnapshotMock() throws IOException {
     return new SnapshotMock(TEST_UTIL.getConfiguration(), fs, rootDir);
   }
+
+  @Override
+  protected void createTableAndSnapshot(TableName tableName, String snapshotName)
+      throws IOException {
+    byte[] column = Bytes.toBytes("A");
+    Table table = MobSnapshotTestingUtils.createMobTable(TEST_UTIL, tableName, column);
+    TEST_UTIL.loadTable(table, column);
+    TEST_UTIL.getAdmin().snapshot(snapshotName, tableName);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1f2b5a0/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
index 08c5088..c1ce040 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreSnapshotHelper.java
@@ -24,22 +24,32 @@ import java.io.IOException;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
 import org.apache.hadoop.hbase.io.HFileLink;
+import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.snapshot.SnapshotTestingUtils.SnapshotMock;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -52,7 +62,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.Snapshot
 /**
  * Test the restore/clone operation from a file-system point of view.
  */
-@Category({RegionServerTests.class, SmallTests.class})
+@Category({RegionServerTests.class, MediumTests.class})
 public class TestRestoreSnapshotHelper {
 
   @ClassRule
@@ -72,6 +82,16 @@ public class TestRestoreSnapshotHelper {
   protected void setupConf(Configuration conf) {
   }
 
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    TEST_UTIL.startMiniCluster();
+  }
+
+  @AfterClass
+  public static void tearDownCluster() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
   @Before
   public void setup() throws Exception {
     rootDir = TEST_UTIL.getDataTestDir("testRestore");
@@ -101,6 +121,51 @@ public class TestRestoreSnapshotHelper {
     restoreAndVerify("snapshot", "namespace1:testRestoreWithNamespace");
   }
 
+  @Test
+  public void testNoHFileLinkInRootDir() throws IOException {
+    rootDir = TEST_UTIL.getDefaultRootDirPath();
+    FSUtils.setRootDir(conf, rootDir);
+    fs = rootDir.getFileSystem(conf);
+
+    TableName tableName = TableName.valueOf("testNoHFileLinkInRootDir");
+    String snapshotName = tableName.getNameAsString() + "-snapshot";
+    createTableAndSnapshot(tableName, snapshotName);
+
+    Path restoreDir = new Path("/hbase/.tmp-restore");
+    RestoreSnapshotHelper.copySnapshotForScanner(conf, fs, rootDir, restoreDir, snapshotName);
+    checkNoHFileLinkInTableDir(tableName);
+  }
+
+  protected void createTableAndSnapshot(TableName tableName, String snapshotName)
+      throws IOException {
+    byte[] column = Bytes.toBytes("A");
+    Table table = TEST_UTIL.createTable(tableName, column, 2);
+    TEST_UTIL.loadTable(table, column);
+    TEST_UTIL.getAdmin().snapshot(snapshotName, tableName);
+  }
+
+  private void checkNoHFileLinkInTableDir(TableName tableName) throws IOException {
+    Path[] tableDirs = new Path[] { CommonFSUtils.getTableDir(rootDir, tableName),
+        CommonFSUtils.getTableDir(new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY), tableName),
+        CommonFSUtils.getTableDir(MobUtils.getMobHome(rootDir), tableName) };
+    for (Path tableDir : tableDirs) {
+      Assert.assertFalse(hasHFileLink(tableDir));
+    }
+  }
+
+  private boolean hasHFileLink(Path tableDir) throws IOException {
+    if (fs.exists(tableDir)) {
+      RemoteIterator<LocatedFileStatus> iterator = fs.listFiles(tableDir, true);
+      while (iterator.hasNext()) {
+        LocatedFileStatus fileStatus = iterator.next();
+        if (fileStatus.isFile() && HFileLink.isHFileLink(fileStatus.getPath())) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
   private void restoreAndVerify(final String snapshotName, final String tableName) throws IOException {
     // Test Rolling-Upgrade like Snapshot.
     // half machines writing using v1 and the others using v2 format.


[33/51] [abbrv] hbase git commit: HBASE-21504 If enable FIFOCompactionPolicy, a compaction may write a "empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be archived.

Posted by el...@apache.org.
HBASE-21504 If enable FIFOCompactionPolicy, a compaction may write a "empty" hfile whose maxTimeStamp is long max. This kind of hfile will never be archived.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d42e0ade
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d42e0ade
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d42e0ade

Branch: refs/heads/HBASE-20952
Commit: d42e0ade1c034022f1b6cb4833ab1dd6292508e0
Parents: 3ad2a89
Author: huzheng <op...@gmail.com>
Authored: Thu Nov 29 18:00:18 2018 +0800
Committer: huzheng <op...@gmail.com>
Committed: Fri Nov 30 09:53:06 2018 +0800

----------------------------------------------------------------------
 .../compactions/FIFOCompactionPolicy.java       | 36 +++++++----
 .../compactions/TestFIFOCompactionPolicy.java   | 63 ++++++++++++++++++++
 2 files changed, 89 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d42e0ade/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java
index 32b40e1..5c8626b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/FIFOCompactionPolicy.java
@@ -96,16 +96,29 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy {
     return hasExpiredStores(storeFiles);
   }
 
+  /**
+   * The FIFOCompactionPolicy only choose those TTL expired HFiles as the compaction candidates. So
+   * if all HFiles are TTL expired, then the compaction will generate a new empty HFile. While its
+   * max timestamp will be Long.MAX_VALUE. If not considered separately, the HFile will never be
+   * archived because its TTL will be never expired. So we'll check the empty store file separately.
+   * (See HBASE-21504)
+   */
+  private boolean isEmptyStoreFile(HStoreFile sf) {
+    return sf.getReader().getEntries() == 0;
+  }
+
   private boolean hasExpiredStores(Collection<HStoreFile> files) {
     long currentTime = EnvironmentEdgeManager.currentTime();
-    for(HStoreFile sf: files){
+    for (HStoreFile sf : files) {
+      if (isEmptyStoreFile(sf)) {
+        return true;
+      }
       // Check MIN_VERSIONS is in HStore removeUnneededFiles
       long maxTs = sf.getReader().getMaxTimestamp();
       long maxTtl = storeConfigInfo.getStoreFileTtl();
-      if (maxTtl == Long.MAX_VALUE
-          || (currentTime - maxTtl < maxTs)){
-        continue; 
-      } else{
+      if (maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)) {
+        continue;
+      } else {
         return true;
       }
     }
@@ -116,14 +129,17 @@ public class FIFOCompactionPolicy extends ExploringCompactionPolicy {
       Collection<HStoreFile> filesCompacting) {
     long currentTime = EnvironmentEdgeManager.currentTime();
     Collection<HStoreFile> expiredStores = new ArrayList<>();
-    for(HStoreFile sf: files){
+    for (HStoreFile sf : files) {
+      if (isEmptyStoreFile(sf)) {
+        expiredStores.add(sf);
+        continue;
+      }
       // Check MIN_VERSIONS is in HStore removeUnneededFiles
       long maxTs = sf.getReader().getMaxTimestamp();
       long maxTtl = storeConfigInfo.getStoreFileTtl();
-      if (maxTtl == Long.MAX_VALUE
-          || (currentTime - maxTtl < maxTs)){
-        continue; 
-      } else if(filesCompacting == null || !filesCompacting.contains(sf)){
+      if (maxTtl == Long.MAX_VALUE || (currentTime - maxTtl < maxTs)) {
+        continue;
+      } else if (filesCompacting == null || !filesCompacting.contains(sf)) {
         expiredStores.add(sf);
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d42e0ade/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
index e5a4f0c..3e95181 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestFIFOCompactionPolicy.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 import java.io.IOException;
 import java.util.List;
 import java.util.concurrent.ThreadLocalRandom;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -41,6 +42,7 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -49,6 +51,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.TimeOffsetEnvironmentEdge;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -191,4 +194,64 @@ public class TestFIFOCompactionPolicy {
         .build();
     TEST_UTIL.getAdmin().createTable(desc);
   }
+
+  /**
+   * Unit test for HBASE-21504
+   */
+  @Test
+  public void testFIFOCompactionPolicyExpiredEmptyHFiles() throws Exception {
+    TableName tableName = TableName.valueOf("testFIFOCompactionPolicyExpiredEmptyHFiles");
+    TableDescriptor desc = TableDescriptorBuilder.newBuilder(tableName)
+        .setValue(DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY,
+          FIFOCompactionPolicy.class.getName())
+        .setValue(HConstants.HBASE_REGION_SPLIT_POLICY_KEY,
+          DisabledRegionSplitPolicy.class.getName())
+        .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family).setTimeToLive(1).build())
+        .build();
+    Table table = TEST_UTIL.createTable(desc, null);
+    long ts = System.currentTimeMillis() - 10 * 1000;
+    Put put =
+        new Put(Bytes.toBytes("row1")).addColumn(family, qualifier, ts, Bytes.toBytes("value0"));
+    table.put(put);
+    TEST_UTIL.getAdmin().flush(tableName); // HFile-0
+    put = new Put(Bytes.toBytes("row2")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
+    table.put(put);
+    TEST_UTIL.getAdmin().flush(tableName); // HFile-1
+
+    HStore store = getStoreWithName(tableName);
+    Assert.assertNotNull(store);
+    Assert.assertEquals(2, store.getStorefilesCount());
+
+    TEST_UTIL.getAdmin().majorCompact(tableName);
+    for (int i = 0; i < 100; i++) {
+      if (store.getStorefilesCount() > 1) {
+        Thread.sleep(100);
+      } else {
+        break;
+      }
+    }
+    Assert.assertEquals(1, store.getStorefilesCount());
+    HStoreFile sf = store.getStorefiles().iterator().next();
+    Assert.assertNotNull(sf);
+    Assert.assertEquals(0, sf.getReader().getEntries());
+
+    put = new Put(Bytes.toBytes("row3")).addColumn(family, qualifier, ts, Bytes.toBytes("value1"));
+    table.put(put);
+    TEST_UTIL.getAdmin().flush(tableName); // HFile-2
+    Assert.assertEquals(2, store.getStorefilesCount());
+
+    TEST_UTIL.getAdmin().majorCompact(tableName);
+    for (int i = 0; i < 100; i++) {
+      if (store.getStorefilesCount() > 1) {
+        Thread.sleep(100);
+      } else {
+        break;
+      }
+    }
+
+    Assert.assertEquals(1, store.getStorefilesCount());
+    sf = store.getStorefiles().iterator().next();
+    Assert.assertNotNull(sf);
+    Assert.assertEquals(0, sf.getReader().getEntries());
+  }
 }


[31/51] [abbrv] hbase git commit: HBASE-21523 Avoid extra logging when the backup system table already exists

Posted by el...@apache.org.
HBASE-21523 Avoid extra logging when the backup system table already exists

Signed-off-by: Peter Somogyi <ps...@cloudera.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fdddc47e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fdddc47e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fdddc47e

Branch: refs/heads/HBASE-20952
Commit: fdddc47e77d57ce1a95d9c194655f0a86d589124
Parents: 8a68f0d
Author: Josh Elser <el...@apache.org>
Authored: Wed Nov 28 21:56:10 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Thu Nov 29 11:40:28 2018 -0500

----------------------------------------------------------------------
 .../apache/hadoop/hbase/backup/impl/BackupSystemTable.java   | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fdddc47e/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index d177384..94ccfe5 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -229,8 +229,14 @@ public final class BackupSystemTable implements Closeable {
   }
 
   private void waitForSystemTable(Admin admin, TableName tableName) throws IOException {
+    // Return fast if the table is available and avoid a log message
+    if (admin.tableExists(tableName) && admin.isTableAvailable(tableName)) {
+      return;
+    }
     long TIMEOUT = 60000;
     long startTime = EnvironmentEdgeManager.currentTime();
+    LOG.debug("Backup table {} is not present and available, waiting for it to become so",
+        tableName);
     while (!admin.tableExists(tableName) || !admin.isTableAvailable(tableName)) {
       try {
         Thread.sleep(100);
@@ -241,7 +247,7 @@ public final class BackupSystemTable implements Closeable {
           "Failed to create backup system table " + tableName + " after " + TIMEOUT + "ms");
       }
     }
-    LOG.debug("Backup table " + tableName + " exists and available");
+    LOG.debug("Backup table {} exists and available", tableName);
   }
 
   @Override


[27/51] [abbrv] hbase git commit: HBASE-21492 CellCodec Written To WAL Before It's Verified

Posted by el...@apache.org.
HBASE-21492 CellCodec Written To WAL Before It's Verified


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7877e09b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7877e09b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7877e09b

Branch: refs/heads/HBASE-20952
Commit: 7877e09b6023c80e8bacd25fb8e0b9273ed7d258
Parents: 64cd30f
Author: BELUGA BEHR <da...@gmail.com>
Authored: Tue Nov 27 08:57:06 2018 -0800
Committer: stack <st...@apache.org>
Committed: Tue Nov 27 08:57:06 2018 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/mapreduce/WALPlayer.java     |  2 +-
 .../regionserver/wal/AbstractProtobufLogWriter.java      |  3 ++-
 .../hadoop/hbase/regionserver/wal/WALCellCodec.java      |  9 ++++-----
 .../hbase/regionserver/wal/TestCustomWALCellCodec.java   | 11 +++++++++++
 4 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7877e09b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
index aa61316..8606fe5 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java
@@ -324,7 +324,7 @@ public class WALPlayer extends Configured implements Tool {
       // No reducers.
       job.setNumReduceTasks(0);
     }
-    String codecCls = WALCellCodec.getWALCellCodecClass(conf);
+    String codecCls = WALCellCodec.getWALCellCodecClass(conf).getName();
     try {
       TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
         Class.forName(codecCls));

http://git-wip-us.apache.org/repos/asf/hbase/blob/7877e09b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
index ae084a4..ff2864d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufLogWriter.java
@@ -80,7 +80,8 @@ public abstract class AbstractProtobufLogWriter {
       builder.setWriterClsName(getWriterClassName());
     }
     if (!builder.hasCellCodecClsName()) {
-      builder.setCellCodecClsName(WALCellCodec.getWALCellCodecClass(conf));
+      builder.setCellCodecClsName(
+          WALCellCodec.getWALCellCodecClass(conf).getName());
     }
     return builder.build();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7877e09b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 34d83f7..5aa943f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -24,7 +24,6 @@ import java.io.OutputStream;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.KeyValue;
@@ -82,8 +81,8 @@ public class WALCellCodec implements Codec {
     this.compression = compression;
   }
 
-  public static String getWALCellCodecClass(Configuration conf) {
-    return conf.get(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class.getName());
+  public static Class<?> getWALCellCodecClass(Configuration conf) {
+    return conf.getClass(WAL_CELL_CODEC_CLASS_KEY, WALCellCodec.class);
   }
 
   /**
@@ -102,7 +101,7 @@ public class WALCellCodec implements Codec {
   public static WALCellCodec create(Configuration conf, String cellCodecClsName,
       CompressionContext compression) throws UnsupportedOperationException {
     if (cellCodecClsName == null) {
-      cellCodecClsName = getWALCellCodecClass(conf);
+      cellCodecClsName = getWALCellCodecClass(conf).getName();
     }
     return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
         { Configuration.class, CompressionContext.class }, new Object[] { conf, compression });
@@ -121,7 +120,7 @@ public class WALCellCodec implements Codec {
    */
   public static WALCellCodec create(Configuration conf,
       CompressionContext compression) throws UnsupportedOperationException {
-    String cellCodecClsName = getWALCellCodecClass(conf);
+    String cellCodecClsName = getWALCellCodecClass(conf).getName();
     return ReflectionUtils.instantiateWithCustomCtor(cellCodecClsName, new Class[]
         { Configuration.class, CompressionContext.class }, new Object[] { conf, compression });
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7877e09b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java
index 9391a85..6add84f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestCustomWALCellCodec.java
@@ -64,4 +64,15 @@ public class TestCustomWALCellCodec {
     assertEquals("Custom codec didn't get initialized with the right compression context!", null,
       codec.context);
   }
+
+  /**
+   * Test that a custom {@link WALCellCodec} will fail if provided an invalid
+   * code class.
+   */
+  @Test(expected = RuntimeException.class)
+  public void testCreatePreparesCodecInvalidClass() throws Exception {
+    Configuration conf = new Configuration(false);
+    conf.setStrings(WALCellCodec.WAL_CELL_CODEC_CLASS_KEY, "org.apache.hbase.wal.NoSuchClass");
+    WALCellCodec.create(conf, null, null);
+  }
 }


[38/51] [abbrv] hbase git commit: HBASE-21534 Addendum output uncaught exception, fix the way that we add new region servers to avoid recreate a dead server

Posted by el...@apache.org.
HBASE-21534 Addendum output uncaught exception, fix the way that we add new region servers to avoid recreate a dead server


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dec49135
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dec49135
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dec49135

Branch: refs/heads/HBASE-20952
Commit: dec49135a36c5dfb4154eb0cc98a2a44e143bb30
Parents: d525ec6
Author: zhangduo <zh...@apache.org>
Authored: Sun Dec 2 19:52:23 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Mon Dec 3 08:19:21 2018 +0800

----------------------------------------------------------------------
 .../assignment/TestAssignmentManagerBase.java      | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dec49135/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index 7b5c550..f666ab8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -67,6 +67,8 @@ import org.junit.rules.TestName;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
@@ -113,9 +115,11 @@ public abstract class TestAssignmentManagerBase {
   protected long unassignSubmittedCount = 0;
   protected long unassignFailedCount = 0;
 
+  protected int newRsAdded;
+
   protected int getAssignMaxAttempts() {
     // Have many so we succeed eventually.
-    return 100;
+    return 1000;
   }
 
   protected void setupConfiguration(Configuration conf) throws Exception {
@@ -130,11 +134,13 @@ public abstract class TestAssignmentManagerBase {
   @Before
   public void setUp() throws Exception {
     util = new HBaseTestingUtility();
-    this.executor = Executors.newSingleThreadScheduledExecutor();
+    this.executor = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder()
+      .setUncaughtExceptionHandler((t, e) -> LOG.warn("Uncaught: ", e)).build());
     setupConfiguration(util.getConfiguration());
     master = new MockMasterServices(util.getConfiguration(), this.regionsToRegionServers);
     rsDispatcher = new MockRSProcedureDispatcher(master);
     master.start(NSERVERS, rsDispatcher);
+    newRsAdded = 0;
     am = master.getAssignmentManager();
     assignProcMetrics = am.getAssignmentManagerMetrics().getAssignProcMetrics();
     unassignProcMetrics = am.getAssignmentManagerMetrics().getUnassignProcMetrics();
@@ -189,7 +195,7 @@ public abstract class TestAssignmentManagerBase {
 
   protected byte[] waitOnFuture(final Future<byte[]> future) throws Exception {
     try {
-      return future.get(60, TimeUnit.SECONDS);
+      return future.get(3, TimeUnit.MINUTES);
     } catch (ExecutionException e) {
       LOG.info("ExecutionException", e);
       Exception ee = (Exception) e.getCause();
@@ -277,9 +283,8 @@ public abstract class TestAssignmentManagerBase {
     this.master.getServerManager().moveFromOnlineToDeadServers(serverName);
     this.am.submitServerCrash(serverName, false/* No WALs here */);
     // add a new server to avoid killing all the region servers which may hang the UTs
-    int maxPort = this.master.getServerManager().getOnlineServersList().stream()
-      .mapToInt(ServerName::getPort).max().getAsInt();
-    ServerName newSn = ServerName.valueOf("localhost", 100 + maxPort + 1, 1);
+    ServerName newSn = ServerName.valueOf("localhost", 10000 + newRsAdded, 1);
+    newRsAdded++;
     try {
       this.master.getServerManager().regionServerReport(newSn, ServerMetricsBuilder.of(newSn));
     } catch (YouAreDeadException e) {


[32/51] [abbrv] hbase git commit: HBASE-18735 Provide an option to kill a MiniHBaseCluster without waiting on shutdown

Posted by el...@apache.org.
HBASE-18735 Provide an option to kill a MiniHBaseCluster without waiting on shutdown

Signed-off-by: Josh Elser <el...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ad2a89f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ad2a89f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ad2a89f

Branch: refs/heads/HBASE-20952
Commit: 3ad2a89fdfb1e330e8e743ddb46f61e346cf15ba
Parents: fdddc47
Author: Artem Ervits <ar...@gmail.com>
Authored: Thu Nov 29 16:37:50 2018 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Thu Nov 29 16:37:50 2018 -0500

----------------------------------------------------------------------
 .../hadoop/hbase/HBaseTestingUtility.java       | 44 +++++++++++++++-----
 .../hadoop/hbase/TestHBaseTestingUtility.java   | 35 ++++++++++++++--
 2 files changed, 64 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ad2a89f/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 655bbdb..31a7cad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1237,9 +1237,41 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
   }
 
   /**
-   * Shutdown HBase mini cluster.  Does not shutdown zk or dfs if running.
+   * Shutdown HBase mini cluster.Does not shutdown zk or dfs if running.
+   * @throws java.io.IOException in case command is unsuccessful
    */
   public void shutdownMiniHBaseCluster() throws IOException {
+    cleanup();
+    if (this.hbaseCluster != null) {
+      this.hbaseCluster.shutdown();
+      // Wait till hbase is down before going on to shutdown zk.
+      this.hbaseCluster.waitUntilShutDown();
+      this.hbaseCluster = null;
+    }
+    if (zooKeeperWatcher != null) {
+      zooKeeperWatcher.close();
+      zooKeeperWatcher = null;
+    }
+  }
+
+  /**
+   * Abruptly Shutdown HBase mini cluster. Does not shutdown zk or dfs if running.
+   * @throws java.io.IOException throws in case command is unsuccessful
+   */
+  public void killMiniHBaseCluster() throws IOException {
+    cleanup();
+    if (this.hbaseCluster != null) {
+      getMiniHBaseCluster().killAll();
+      this.hbaseCluster = null;
+    }
+    if (zooKeeperWatcher != null) {
+      zooKeeperWatcher.close();
+      zooKeeperWatcher = null;
+    }
+  }
+
+  // close hbase admin, close current connection and reset MIN MAX configs for RS.
+  private void cleanup() throws IOException {
     if (hbaseAdmin != null) {
       hbaseAdmin.close();
       hbaseAdmin = null;
@@ -1251,16 +1283,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
     // unset the configuration for MIN and MAX RS to start
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, -1);
     conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MAXTOSTART, -1);
-    if (this.hbaseCluster != null) {
-      this.hbaseCluster.shutdown();
-      // Wait till hbase is down before going on to shutdown zk.
-      this.hbaseCluster.waitUntilShutDown();
-      this.hbaseCluster = null;
-    }
-    if (zooKeeperWatcher != null) {
-      zooKeeperWatcher.close();
-      zooKeeperWatcher = null;
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ad2a89f/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
index 97de8a9..0ec97ef 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHBaseTestingUtility.java
@@ -26,11 +26,7 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
-import java.io.IOException;
-import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 import java.util.Random;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -62,6 +58,9 @@ import org.slf4j.LoggerFactory;
  */
 @Category({MiscTests.class, LargeTests.class})
 public class TestHBaseTestingUtility {
+  private static final int NUMTABLES = 1;
+  private static final int NUMROWS = 100;
+  private static final int NUMREGIONS = 10;
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
@@ -471,4 +470,32 @@ public class TestHBaseTestingUtility {
       htu.shutdownMiniCluster();
     }
   }
+
+  // This test demonstrates how long killHBTU takes vs. shutdownHBTU takes
+  // for realistic results, adjust NUMROWS, NUMTABLES to much larger number.
+  @Test
+  public void testKillMiniHBaseCluster() throws Exception {
+
+    HBaseTestingUtility htu = new HBaseTestingUtility();
+    htu.startMiniZKCluster();
+
+    try {
+      htu.startMiniHBaseCluster();
+
+      TableName tableName;
+      byte[] FAM_NAME;
+
+      for(int i = 0; i < NUMTABLES; i++) {
+        tableName = TableName.valueOf(name.getMethodName() + i);
+        FAM_NAME = Bytes.toBytes("fam" + i);
+
+        try (Table table = htu.createMultiRegionTable(tableName, FAM_NAME, NUMREGIONS)) {
+          htu.loadRandomRows(table, FAM_NAME, 100, NUMROWS);
+        }
+      }
+    } finally {
+      htu.killMiniHBaseCluster();
+      htu.shutdownMiniZKCluster();
+    }
+  }
 }


[12/51] [abbrv] hbase git commit: HBASE-21490 WALProcedure may remove proc wal files still with active procedures

Posted by el...@apache.org.
HBASE-21490 WALProcedure may remove proc wal files still with active procedures

Signed-off-by: Allan Yang <al...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/405bf5e6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/405bf5e6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/405bf5e6

Branch: refs/heads/HBASE-20952
Commit: 405bf5e6383a09f435baadbac6c389e9f6c43ac6
Parents: 83dc38a
Author: Duo Zhang <zh...@apache.org>
Authored: Mon Nov 19 11:03:52 2018 +0800
Committer: stack <st...@apache.org>
Committed: Mon Nov 19 08:21:28 2018 -0800

----------------------------------------------------------------------
 .../procedure2/store/ProcedureStoreTracker.java |   3 +-
 .../store/wal/ProcedureWALFormat.java           |  33 ++--
 .../procedure2/store/wal/WALProcedureStore.java |  40 +++--
 .../hbase/master/TestLoadProcedureError.java    | 150 +++++++++++++++++++
 4 files changed, 192 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/405bf5e6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 25c9427..7d430d6 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -274,7 +274,8 @@ public class ProcedureStoreTracker {
     this.keepDeletes = false;
     this.partial = false;
     this.map.clear();
-    resetModified();
+    minModifiedProcId = Long.MAX_VALUE;
+    maxModifiedProcId = Long.MIN_VALUE;
   }
 
   public boolean isModified(long procId) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/405bf5e6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
index 179c740..9686593 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java
@@ -88,27 +88,24 @@ public final class ProcedureWALFormat {
       Loader loader) throws IOException {
     ProcedureWALFormatReader reader = new ProcedureWALFormatReader(tracker, loader);
     tracker.setKeepDeletes(true);
-    try {
-      // Ignore the last log which is current active log.
-      while (logs.hasNext()) {
-        ProcedureWALFile log = logs.next();
-        log.open();
-        try {
-          reader.read(log);
-        } finally {
-          log.close();
-        }
+    // Ignore the last log which is current active log.
+    while (logs.hasNext()) {
+      ProcedureWALFile log = logs.next();
+      log.open();
+      try {
+        reader.read(log);
+      } finally {
+        log.close();
       }
-      reader.finish();
+    }
+    reader.finish();
 
-      // The tracker is now updated with all the procedures read from the logs
-      if (tracker.isPartial()) {
-        tracker.setPartialFlag(false);
-      }
-      tracker.resetModified();
-    } finally {
-      tracker.setKeepDeletes(false);
+    // The tracker is now updated with all the procedures read from the logs
+    if (tracker.isPartial()) {
+      tracker.setPartialFlag(false);
     }
+    tracker.resetModified();
+    tracker.setKeepDeletes(false);
   }
 
   public static void writeHeader(OutputStream stream, ProcedureWALHeader header)

http://git-wip-us.apache.org/repos/asf/hbase/blob/405bf5e6/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index dbab6b7..82dc9df 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -448,13 +448,14 @@ public class WALProcedureStore extends ProcedureStoreBase {
     lock.lock();
     try {
       if (logs.isEmpty()) {
-        throw new RuntimeException("recoverLease() must be called before loading data");
+        throw new IllegalStateException("recoverLease() must be called before loading data");
       }
 
       // Nothing to do, If we have only the current log.
       if (logs.size() == 1) {
         LOG.debug("No state logs to replay.");
         loader.setMaxProcId(0);
+        loading.set(false);
         return;
       }
 
@@ -488,15 +489,20 @@ public class WALProcedureStore extends ProcedureStoreBase {
           // TODO: sideline corrupted log
         }
       });
+      // if we fail when loading, we should prevent persisting the storeTracker later in the stop
+      // method. As it may happen that, we have finished constructing the modified and deleted bits,
+      // but before we call resetModified, we fail, then if we persist the storeTracker then when
+      // restarting, we will consider that all procedures have been included in this file and delete
+      // all the previous files. Obviously this not correct. So here we will only set loading to
+      // false when we successfully loaded all the procedures, and when closing we will skip
+      // persisting the store tracker. And also, this will prevent the sync thread to do
+      // periodicRoll, where we may also clean old logs.
+      loading.set(false);
+      // try to cleanup inactive wals and complete the operation
+      buildHoldingCleanupTracker();
+      tryCleanupLogsOnLoad();
     } finally {
-      try {
-        // try to cleanup inactive wals and complete the operation
-        buildHoldingCleanupTracker();
-        tryCleanupLogsOnLoad();
-        loading.set(false);
-      } finally {
-        lock.unlock();
-      }
+      lock.unlock();
     }
   }
 
@@ -1133,11 +1139,15 @@ public class WALProcedureStore extends ProcedureStoreBase {
 
     try {
       ProcedureWALFile log = logs.getLast();
-      log.setProcIds(storeTracker.getModifiedMinProcId(), storeTracker.getModifiedMaxProcId());
-      log.updateLocalTracker(storeTracker);
-      if (!abort) {
-        long trailerSize = ProcedureWALFormat.writeTrailer(stream, storeTracker);
-        log.addToSize(trailerSize);
+      // If the loading flag is true, it usually means that we fail when loading procedures, so we
+      // should not persist the store tracker, as its state may not be correct.
+      if (!loading.get()) {
+        log.setProcIds(storeTracker.getModifiedMinProcId(), storeTracker.getModifiedMaxProcId());
+        log.updateLocalTracker(storeTracker);
+        if (!abort) {
+          long trailerSize = ProcedureWALFormat.writeTrailer(stream, storeTracker);
+          log.addToSize(trailerSize);
+        }
       }
     } catch (IOException e) {
       LOG.warn("Unable to write the trailer", e);
@@ -1193,7 +1203,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
       if (holdingCleanupTracker.isEmpty()) {
         break;
       }
-      iter.next();
+      tracker = iter.next().getTracker();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/405bf5e6/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java
new file mode 100644
index 0000000..0a57dba
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestLoadProcedureError.java
@@ -0,0 +1,150 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertFalse;
+
+import java.util.concurrent.CountDownLatch;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.NoopProcedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+
+/**
+ * Testcase for HBASE-21490.
+ */
+@Category({ MasterTests.class, MediumTests.class })
+public class TestLoadProcedureError {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestLoadProcedureError.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static TableName NAME = TableName.valueOf("Load");
+
+  private static volatile CountDownLatch ARRIVE;
+
+  private static volatile boolean FINISH_PROC;
+
+  private static volatile boolean FAIL_LOAD;
+
+  public static final class TestProcedure extends NoopProcedure<MasterProcedureEnv>
+      implements TableProcedureInterface {
+
+    @Override
+    protected Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+        throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
+      if (ARRIVE != null) {
+        ARRIVE.countDown();
+        ARRIVE = null;
+      }
+      if (FINISH_PROC) {
+        return null;
+      }
+      setTimeout(1000);
+      setState(ProcedureState.WAITING_TIMEOUT);
+      throw new ProcedureSuspendedException();
+    }
+
+    @Override
+    protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) {
+      setState(ProcedureState.RUNNABLE);
+      env.getProcedureScheduler().addBack(this);
+      return false;
+    }
+
+    @Override
+    protected void afterReplay(MasterProcedureEnv env) {
+      if (FAIL_LOAD) {
+        throw new RuntimeException("Inject error");
+      }
+    }
+
+    @Override
+    public TableName getTableName() {
+      return NAME;
+    }
+
+    @Override
+    public TableOperationType getTableOperationType() {
+      return TableOperationType.READ;
+    }
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  private void waitNoMaster() {
+    UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getLiveMasterThreads().isEmpty());
+  }
+
+  @Test
+  public void testLoadError() throws Exception {
+    ProcedureExecutor<MasterProcedureEnv> procExec =
+      UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
+    ARRIVE = new CountDownLatch(1);
+    long procId = procExec.submitProcedure(new TestProcedure());
+    ARRIVE.await();
+    FAIL_LOAD = true;
+    // do not persist the store tracker
+    UTIL.getMiniHBaseCluster().getMaster().getWalProcedureStore().stop(true);
+    UTIL.getMiniHBaseCluster().getMaster().abort("for testing");
+    waitNoMaster();
+    // restart twice, and should fail twice, as we will throw an exception in the afterReplay above
+    // in order to reproduce the problem in HBASE-21490 stably, here we will wait until a master is
+    // fully done, before starting the new master, otherwise the new master may start too early and
+    // call recoverLease on the proc wal files and cause we fail to persist the store tracker when
+    // shutting down
+    UTIL.getMiniHBaseCluster().startMaster();
+    waitNoMaster();
+    UTIL.getMiniHBaseCluster().startMaster();
+    waitNoMaster();
+    FAIL_LOAD = false;
+    HMaster master = UTIL.getMiniHBaseCluster().startMaster().getMaster();
+    UTIL.waitFor(30000, () -> master.isActiveMaster() && master.isInitialized());
+    // assert the procedure is still there and not finished yet
+    TestProcedure proc = (TestProcedure) master.getMasterProcedureExecutor().getProcedure(procId);
+    assertFalse(proc.isFinished());
+    FINISH_PROC = true;
+    UTIL.waitFor(30000, () -> proc.isFinished());
+  }
+}


[26/51] [abbrv] hbase git commit: HBASE-21493 update downloads page for HBase 1.2.9 release.

Posted by el...@apache.org.
HBASE-21493 update downloads page for HBase 1.2.9 release.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64cd30f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64cd30f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64cd30f5

Branch: refs/heads/HBASE-20952
Commit: 64cd30f59128bc61dfe6d3d71de75a664e63e473
Parents: 6f15cec
Author: Sean Busbey <bu...@apache.org>
Authored: Tue Nov 27 09:07:00 2018 -0600
Committer: Sean Busbey <bu...@apache.org>
Committed: Tue Nov 27 09:07:00 2018 -0600

----------------------------------------------------------------------
 src/site/xdoc/downloads.xml | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/64cd30f5/src/site/xdoc/downloads.xml
----------------------------------------------------------------------
diff --git a/src/site/xdoc/downloads.xml b/src/site/xdoc/downloads.xml
index c4f386a..e526f9c 100644
--- a/src/site/xdoc/downloads.xml
+++ b/src/site/xdoc/downloads.xml
@@ -108,23 +108,23 @@ under the License.
     </tr>
     <tr>
       <td style="test-align: left">
-        1.2.8
+        1.2.9
       </td>
       <td style="test-align: left">
-        2018/10/20
+        2018/11/27
       </td>
       <td style="test-align: left">
-        <a href="https://apache.org/dist/hbase/hbase-1.2.8/compat-check-report.html">1.2.7 vs 1.2.8</a>
+        <a href="https://apache.org/dist/hbase/hbase-1.2.9/compat-check-report.html">1.2.8 vs 1.2.9</a>
       </td>
       <td style="test-align: left">
-        <a href="https://github.com/apache/hbase/blob/rel/1.2.8/CHANGES.txt">Changes</a>
+        <a href="https://github.com/apache/hbase/blob/rel/1.2.9/CHANGES.txt">Changes</a>
       </td>
       <td style="test-align: left">
-        <a href="https://s.apache.org/hbase-1.2.8-jira-release-notes">Release Notes</a>
+        <a href="https://s.apache.org/hbase-1.2.9-jira-release-notes">Release Notes</a>
       </td>
       <td style="test-align: left">
-        <a href="https://www.apache.org/dyn/closer.lua/hbase/hbase-1.2.8/hbase-1.2.8-src.tar.gz">src</a> (<a href="https://apache.org/dist/hbase/hbase-1.2.8/hbase-1.2.8-src.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/hbase-1.2.8/hbase-1.2.8-src.tar.gz.asc">asc</a>) <br />
-        <a href="https://www.apache.org/dyn/closer.lua/hbase/hbase-1.2.8/hbase-1.2.8-bin.tar.gz">bin</a> (<a href="https://apache.org/dist/hbase/hbase-1.2.8/hbase-1.2.8-bin.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/hbase-1.2.8/hbase-1.2.8-bin.tar.gz.asc">asc</a>)
+        <a href="https://www.apache.org/dyn/closer.lua/hbase/hbase-1.2.9/hbase-1.2.9-src.tar.gz">src</a> (<a href="https://apache.org/dist/hbase/hbase-1.2.9/hbase-1.2.9-src.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/hbase-1.2.9/hbase-1.2.9-src.tar.gz.asc">asc</a>) <br />
+        <a href="https://www.apache.org/dyn/closer.lua/hbase/hbase-1.2.9/hbase-1.2.9-bin.tar.gz">bin</a> (<a href="https://apache.org/dist/hbase/hbase-1.2.9/hbase-1.2.9-bin.tar.gz.sha512">sha512</a> <a href="https://apache.org/dist/hbase/hbase-1.2.9/hbase-1.2.9-bin.tar.gz.asc">asc</a>)
       </td>
     </tr>
   </table>


[23/51] [abbrv] hbase git commit: HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method

Posted by el...@apache.org.
HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method

Signed-off-by: zhangduo <zh...@apache.org>
Signed-off-by: Anoop Sam John <an...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3133d438
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3133d438
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3133d438

Branch: refs/heads/HBASE-20952
Commit: 3133d4385989ccda3be3aa55c7d8afa798180dd0
Parents: 34e1d74
Author: lixiaobao <ao...@gmail.com>
Authored: Tue Nov 27 20:30:23 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Nov 27 20:31:07 2018 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/AbstractMultiFileWriter.java      | 11 ++++++-----
 1 file changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3133d438/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index 2fdab81..43d0ad8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -21,12 +21,10 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
-
 import org.apache.hadoop.fs.Path;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.regionserver.CellSink;
 
 /**
  * Base class for cell sink that separates the provided cells into multiple files.
@@ -119,9 +117,12 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen
 
   @Override
   public void beforeShipped() throws IOException {
-    if (this.writers() != null) {
-      for (StoreFileWriter writer : writers()) {
-        writer.beforeShipped();
+    Collection<StoreFileWriter> writers = writers();
+    if (writers != null) {
+      for (StoreFileWriter writer : writers) {
+        if (writer != null) {
+          writer.beforeShipped();
+        }
       }
     }
   }


[48/51] [abbrv] hbase git commit: HBASE-21549 Add shell command for serial replication peer

Posted by el...@apache.org.
HBASE-21549 Add shell command for serial replication peer


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e65bd5c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e65bd5c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e65bd5c

Branch: refs/heads/HBASE-20952
Commit: 1e65bd5cf96cf5affd446596ef10b1034e2e0a88
Parents: dfb9ae8
Author: Guanghao Zhang <zg...@apache.org>
Authored: Wed Dec 5 18:05:03 2018 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Fri Dec 7 10:10:13 2018 +0800

----------------------------------------------------------------------
 .../src/main/ruby/hbase/replication_admin.rb    |  5 +++++
 hbase-shell/src/main/ruby/hbase_constants.rb    |  1 +
 .../src/main/ruby/shell/commands/add_peer.rb    |  4 ++++
 .../main/ruby/shell/commands/set_peer_serial.rb |  4 ++--
 .../test/ruby/hbase/replication_admin_test.rb   | 22 +++++++++++++++++++
 src/main/asciidoc/_chapters/ops_mgt.adoc        | 23 +++++++++++++++++++-
 6 files changed, 56 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 5f86365..c01b6ea 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -66,6 +66,7 @@ module Hbase
         namespaces = args.fetch(NAMESPACES, nil)
         peer_state = args.fetch(STATE, nil)
         remote_wal_dir = args.fetch(REMOTE_WAL_DIR, nil)
+        serial = args.fetch(SERIAL, nil)
 
         # Create and populate a ReplicationPeerConfig
         builder = ReplicationPeerConfig.newBuilder()
@@ -79,6 +80,10 @@ module Hbase
           builder.setRemoteWALDir(remote_wal_dir)
         end
 
+        unless serial.nil?
+          builder.setSerial(serial)
+        end
+
         unless config.nil?
           builder.putAllConfiguration(config)
         end

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/hbase_constants.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
index 2870dfb..4c1ad22 100644
--- a/hbase-shell/src/main/ruby/hbase_constants.rb
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -78,6 +78,7 @@ module HBaseConstants
   ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'.freeze
   CLUSTER_KEY = 'CLUSTER_KEY'.freeze
   REMOTE_WAL_DIR = 'REMOTE_WAL_DIR'.freeze
+  SERIAL = 'SERIAL'.freeze
   TABLE_CFS = 'TABLE_CFS'.freeze
   NAMESPACES = 'NAMESPACES'.freeze
   STATE = 'STATE'.freeze

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index 4b6f294..9be42ac 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -34,6 +34,8 @@ An optional parameter for namespaces identifies which namespace's tables will be
 to the peer cluster.
 An optional parameter for table column families identifies which tables and/or column families
 will be replicated to the peer cluster.
+An optional parameter for serial flag identifies whether or not the replication peer is a serial
+replication peer. The default serial flag is false.
 
 Note: Set a namespace in the peer config means that all tables in this namespace
 will be replicated to the peer cluster. So if you already have set a namespace in peer config,
@@ -50,6 +52,8 @@ Examples:
     NAMESPACES => ["ns1", "ns2", "ns3"]
   hbase> add_peer '2', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod",
     NAMESPACES => ["ns1", "ns2"], TABLE_CFS => { "ns3:table1" => [], "ns3:table2" => ["cf1"] }
+  hbase> add_peer '3', CLUSTER_KEY => "zk1,zk2,zk3:2182:/hbase-prod",
+    NAMESPACES => ["ns1", "ns2", "ns3"], SERIAL => true
 
 For a custom replication endpoint, the ENDPOINT_CLASSNAME can be provided. Two optional arguments
 are DATA and CONFIG which can be specified to set different either the peer_data or configuration

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
index d556077..a6484cd 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_serial.rb
@@ -41,8 +41,8 @@ module Shell
   EOF
       end
 
-      def command(id, peer_serial)
-        replication_admin.set_peer_serial(id, peer_serial)
+      def command(id, serial)
+        replication_admin.set_peer_serial(id, serial)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index f44fd8c..f4c771e 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -100,6 +100,27 @@ module Hbase
       command(:remove_peer, @peer_id)
     end
 
+    define_test "add_peer: serial" do
+      cluster_key = "server1.cie.com:2181:/hbase"
+      remote_wal_dir = "hdfs://srv1:9999/hbase"
+      table_cfs = { "ns3:table1" => [], "ns3:table2" => [],
+        "ns3:table3" => [] }
+      # add a new replication peer which serial flag is true
+      args = { CLUSTER_KEY => cluster_key, SERIAL => true,
+        TABLE_CFS => table_cfs}
+      command(:add_peer, @peer_id, args)
+
+      assert_equal(1, command(:list_peers).length)
+      peer = command(:list_peers).get(0)
+      assert_equal(@peer_id, peer.getPeerId)
+      assert_equal(cluster_key, peer.getPeerConfig.getClusterKey)
+      assert_equal(true, peer.getPeerConfig.isSerial)
+      assert_tablecfs_equal(table_cfs, peer.getPeerConfig.getTableCFsMap())
+
+      # cleanup for future tests
+      command(:remove_peer, @peer_id)
+    end
+
     define_test "add_peer: remote wal dir" do
       cluster_key = "server1.cie.com:2181:/hbase"
       remote_wal_dir = "hdfs://srv1:9999/hbase"
@@ -490,6 +511,7 @@ module Hbase
 
       assert_equal(1, command(:list_peers).length)
       peer_config = command(:list_peers).get(0).getPeerConfig
+      # the default serial flag is false
       assert_equal(false, peer_config.isSerial)
 
       command(:set_peer_serial, @peer_id, true)

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e65bd5c/src/main/asciidoc/_chapters/ops_mgt.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc
index f2ee1cc..db85b45 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1898,7 +1898,28 @@ This treatment can possibly lead to data inconsistency between source and destin
 
 .Serial replication configuration
 
-. Set the serial flag to true for a repliation peer. You can either set it to true when creating a replication peer, or change it to true later.
+Set the serial flag to true for a repliation peer. And the default serial flag is false.
+
+* Add a new replication peer which serial flag is true
+
+[source,ruby]
+----
+hbase> add_peer '1', CLUSTER_KEY => "server1.cie.com:2181:/hbase", SERIAL => true
+----
+
+* Set a replication peer's serial flag to false
+
+[source,ruby]
+----
+hbase> set_peer_serial '1', false
+----
+
+* Set a replication peer's serial flag to true
+
+[source,ruby]
+----
+hbase> set_peer_serial '1', true
+----
 
 The serial replication feature had been done firstly in link:https://issues.apache.org/jira/browse/HBASE-9465[HBASE-9465] and then reverted and redone in link:https://issues.apache.org/jira/browse/HBASE-20046[HBASE-20046]. You can find more details in these issues.
 


[21/51] [abbrv] hbase git commit: HBASE-21154 Remove hbase:namespace table; fold it into hbase:meta

Posted by el...@apache.org.
HBASE-21154 Remove hbase:namespace table; fold it into hbase:meta


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1acbd36c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1acbd36c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1acbd36c

Branch: refs/heads/HBASE-20952
Commit: 1acbd36c903b048141866b143507bfce124a5c5f
Parents: a0e3cb6
Author: zhangduo <zh...@apache.org>
Authored: Fri Nov 23 19:56:41 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Mon Nov 26 14:51:56 2018 +0800

----------------------------------------------------------------------
 .../hbase/client/TableDescriptorBuilder.java    |  38 +-
 .../org/apache/hadoop/hbase/HConstants.java     |   8 +-
 .../hadoop/hbase/NamespaceDescriptor.java       |   2 +-
 .../java/org/apache/hadoop/hbase/TableName.java |   9 +-
 .../src/main/protobuf/MasterProcedure.proto     |   9 +-
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |  31 +-
 .../hbase/rsgroup/TestRSGroupsBasics.java       |   4 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon    |   2 -
 .../apache/hadoop/hbase/ZKNamespaceManager.java | 215 -----------
 .../hbase/master/ClusterSchemaServiceImpl.java  |  30 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  29 +-
 .../hbase/master/TableNamespaceManager.java     | 372 +++++--------------
 .../hbase/master/locking/LockProcedure.java     |   2 +-
 .../AbstractStateMachineNamespaceProcedure.java |  46 ++-
 .../procedure/CreateNamespaceProcedure.java     | 156 ++------
 .../procedure/DeleteNamespaceProcedure.java     | 217 +++--------
 .../master/procedure/InitMetaProcedure.java     |  42 +++
 .../procedure/MasterProcedureScheduler.java     |   9 +-
 .../procedure/ModifyNamespaceProcedure.java     | 109 ++----
 .../procedure/TableProcedureInterface.java      |   7 +
 .../hbase/master/procedure/TableQueue.java      |   4 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   6 +-
 .../hbase/regionserver/HRegionServer.java       |  21 +-
 .../hbase/security/access/AccessController.java |   2 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |  11 +
 .../hadoop/hbase/HBaseTestingUtility.java       |   1 -
 .../hadoop/hbase/TestGlobalMemStoreSize.java    |   4 +-
 .../org/apache/hadoop/hbase/TestNamespace.java  |  19 +-
 .../client/TestAsyncNamespaceAdminApi.java      |  20 -
 .../hadoop/hbase/client/TestFromClientSide.java |   2 +-
 .../hadoop/hbase/master/AbstractTestDLS.java    |   8 +-
 .../hadoop/hbase/master/TestMasterMetrics.java  |  71 ++--
 .../TestMasterOperationsForRegionReplicas.java  |  10 +-
 .../TestMasterRestartAfterDisablingTable.java   |  15 +-
 .../hbase/master/TestMigrateNamespaceTable.java | 108 ++++++
 .../hadoop/hbase/master/TestRestartCluster.java |   7 +-
 .../hadoop/hbase/master/TestRollingRestart.java |  10 +-
 .../assignment/TestRegionMoveAndAbandon.java    |  13 +-
 .../TestFavoredStochasticLoadBalancer.java      |   1 -
 .../procedure/TestMasterProcedureScheduler.java |   4 +-
 .../procedure/TestModifyNamespaceProcedure.java |   6 +-
 .../master/procedure/TestProcedurePriority.java |   2 -
 .../hbase/regionserver/TestLogRoller.java       |   5 +
 .../regionserver/TestPerColumnFamilyFlush.java  |   7 -
 .../TestRegionReplicasWithRestartScenarios.java |   2 +-
 .../regionserver/TestRegionServerMetrics.java   |   2 +-
 .../wal/AbstractTestLogRolling.java             |   3 -
 .../TestReplicationWALEntryFilters.java         |   7 -
 .../hadoop/hbase/util/TestHBaseFsckMOB.java     |   1 -
 49 files changed, 602 insertions(+), 1107 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index d1c3f78..22c71c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -220,11 +220,28 @@ public class TableDescriptorBuilder {
     RESERVED_KEYWORDS.add(IS_META_KEY);
   }
 
+  /**
+   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
+   *             any more.
+   */
   @InterfaceAudience.Private
+  @Deprecated
   public final static String NAMESPACE_FAMILY_INFO = "info";
+
+  /**
+   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
+   *             any more.
+   */
   @InterfaceAudience.Private
+  @Deprecated
   public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = Bytes.toBytes(NAMESPACE_FAMILY_INFO);
+
+  /**
+   * @deprecated namespace table has been folded into the ns family in meta table, do not use this
+   *             any more.
+   */
   @InterfaceAudience.Private
+  @Deprecated
   public final static byte[] NAMESPACE_COL_DESC_BYTES = Bytes.toBytes("d");
 
   /**
@@ -245,22 +262,21 @@ public class TableDescriptorBuilder {
       CP_HTD_ATTR_VALUE_PARAM_VALUE_PATTERN + "),?");
   private static final Pattern CP_HTD_ATTR_KEY_PATTERN =
     Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE);
+
   /**
    * Table descriptor for namespace table
+   * @deprecated We have folded the data in namespace table into meta table, so do not use it any
+   *             more.
    */
-  // TODO We used to set CacheDataInL1 for NS table. When we have BucketCache in file mode, now the
-  // NS data goes to File mode BC only. Test how that affect the system. If too much, we have to
-  // rethink about adding back the setCacheDataInL1 for NS table.
-  public static final TableDescriptor NAMESPACE_TABLEDESC
-    = TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
+  @Deprecated
+  public static final TableDescriptor NAMESPACE_TABLEDESC =
+    TableDescriptorBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME)
       .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(NAMESPACE_FAMILY_INFO_BYTES)
-        // Ten is arbitrary number.  Keep versions to help debugging.
-        .setMaxVersions(10)
-        .setInMemory(true)
-        .setBlocksize(8 * 1024)
-        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
-        .build())
+        // Ten is arbitrary number. Keep versions to help debugging.
+        .setMaxVersions(10).setInMemory(true).setBlocksize(8 * 1024)
+        .setScope(HConstants.REPLICATION_SCOPE_LOCAL).build())
       .build();
+
   private final ModifyableTableDescriptor desc;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index fbfab4b..fdc3d82 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -26,7 +26,6 @@ import java.util.Collections;
 import java.util.List;
 import java.util.UUID;
 import java.util.regex.Pattern;
-
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -508,6 +507,13 @@ public final class HConstants {
   public static final byte[] REPLICATION_BARRIER_FAMILY =
       Bytes.toBytes(REPLICATION_BARRIER_FAMILY_STR);
 
+  /** The namespace family as a string */
+  public static final String NAMESPACE_FAMILY_STR = "ns";
+
+  /** The namespace family */
+  public static final byte[] NAMESPACE_FAMILY = Bytes.toBytes(NAMESPACE_FAMILY_STR);
+
+  public static final byte[] NAMESPACE_COL_DESC_QUALIFIER = Bytes.toBytes("d");
   /**
    * The meta table version column qualifier.
    * We keep current version of the meta table in this column in <code>-ROOT-</code>

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
index d1b69ba..07386b5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/NamespaceDescriptor.java
@@ -166,7 +166,7 @@ public class NamespaceDescriptor {
 
     private Builder(NamespaceDescriptor ns) {
       this.bName = ns.name;
-      this.bConfiguration = ns.configuration;
+      this.bConfiguration.putAll(ns.configuration);
     }
 
     private Builder(String name) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
index e6cabbc..093bf20 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TableName.java
@@ -80,9 +80,14 @@ public final class TableName implements Comparable<TableName> {
   public static final TableName META_TABLE_NAME =
       valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "meta");
 
-  /** The Namespace table's name. */
+  /**
+   * The Namespace table's name.
+   * @deprecated We have folded the data in namespace table into meta table, so do not use it any
+   *             more.
+   */
+  @Deprecated
   public static final TableName NAMESPACE_TABLE_NAME =
-      valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
+    valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "namespace");
 
   public static final String OLD_META_STR = ".META.";
   public static final String OLD_ROOT_STR = "-ROOT-";

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 8bb69cf..44ac952 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -117,7 +117,7 @@ enum CreateNamespaceState {
   CREATE_NAMESPACE_PREPARE = 1;
   CREATE_NAMESPACE_CREATE_DIRECTORY = 2;
   CREATE_NAMESPACE_INSERT_INTO_NS_TABLE = 3;
-  CREATE_NAMESPACE_UPDATE_ZK = 4;
+  CREATE_NAMESPACE_UPDATE_ZK = 4[deprecated=true];
   CREATE_NAMESPACE_SET_NAMESPACE_QUOTA = 5;
 }
 
@@ -128,7 +128,7 @@ message CreateNamespaceStateData {
 enum ModifyNamespaceState {
   MODIFY_NAMESPACE_PREPARE = 1;
   MODIFY_NAMESPACE_UPDATE_NS_TABLE = 2;
-  MODIFY_NAMESPACE_UPDATE_ZK = 3;
+  MODIFY_NAMESPACE_UPDATE_ZK = 3[deprecated=true];
 }
 
 message ModifyNamespaceStateData {
@@ -139,7 +139,7 @@ message ModifyNamespaceStateData {
 enum DeleteNamespaceState {
   DELETE_NAMESPACE_PREPARE = 1;
   DELETE_NAMESPACE_DELETE_FROM_NS_TABLE = 2;
-  DELETE_NAMESPACE_REMOVE_FROM_ZK = 3;
+  DELETE_NAMESPACE_REMOVE_FROM_ZK = 3[deprecated=true];
   DELETE_NAMESPACE_DELETE_DIRECTORIES = 4;
   DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA = 5;
 }
@@ -475,7 +475,8 @@ message ReopenTableRegionsStateData {
 }
 
 enum InitMetaState {
-   INIT_META_ASSIGN_META = 1;
+  INIT_META_ASSIGN_META = 1;
+  INIT_META_CREATE_NAMESPACES = 2;
 }
 
 message InitMetaStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 4a881d3..cdd68d1 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hbase.rsgroup;
 
 import com.google.protobuf.ServiceException;
-
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -34,7 +33,6 @@ import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -85,6 +83,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
 import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 
@@ -757,12 +756,9 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
         assignedRegions.clear();
         found.set(true);
         try {
-          conn.getTable(TableName.NAMESPACE_TABLE_NAME);
-          conn.getTable(RSGROUP_TABLE_NAME);
           boolean rootMetaFound =
               masterServices.getMetaTableLocator().verifyMetaRegionLocation(
                   conn, masterServices.getZooKeeper(), 1);
-          final AtomicBoolean nsFound = new AtomicBoolean(false);
           if (rootMetaFound) {
             MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() {
               @Override
@@ -791,36 +787,13 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
                     }
                     foundRegions.add(info);
                   }
-                  if (TableName.NAMESPACE_TABLE_NAME.equals(info.getTable())) {
-                    Cell cell = row.getColumnLatestCell(HConstants.CATALOG_FAMILY,
-                        HConstants.SERVER_QUALIFIER);
-                    ServerName sn = null;
-                    if(cell != null) {
-                      sn = ServerName.parseVersionedServerName(CellUtil.cloneValue(cell));
-                    }
-                    if (sn == null) {
-                      nsFound.set(false);
-                    } else if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
-                        TableState.State.ENABLED)) {
-                      try {
-                        ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-                        ClientProtos.GetRequest request =
-                            RequestConverter.buildGetRequest(info.getRegionName(),
-                                new Get(ROW_KEY));
-                        rs.get(null, request);
-                        nsFound.set(true);
-                      } catch(Exception ex) {
-                        LOG.debug("Caught exception while verifying group region", ex);
-                      }
-                    }
-                  }
                 }
                 return true;
               }
             };
             MetaTableAccessor.fullScanRegions(conn, visitor);
             // if no regions in meta then we have to create the table
-            if (foundRegions.size() < 1 && rootMetaFound && !createSent && nsFound.get()) {
+            if (foundRegions.size() < 1 && rootMetaFound && !createSent) {
               createRSGroupTable();
               createSent = true;
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
index 7415ab5..3860c59 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBasics.java
@@ -93,8 +93,8 @@ public class TestRSGroupsBasics extends TestRSGroupsBase {
     assertEquals(4, defaultInfo.getServers().size());
     // Assignment of root and meta regions.
     int count = master.getAssignmentManager().getRegionStates().getRegionAssignments().size();
-    //3 meta,namespace, group
-    assertEquals(3, count);
+    // 2 meta, group
+    assertEquals(2, count);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 03056e1..7bb6c40 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -447,8 +447,6 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
             description = "The hbase:acl table holds information about acl.";
         } else if (tableName.equals(VisibilityConstants.LABELS_TABLE_NAME)){
             description = "The hbase:labels table holds information about visibility labels.";
-        } else if (tableName.equals(TableName.NAMESPACE_TABLE_NAME)){
-            description = "The hbase:namespace table holds information about namespaces.";
         } else if (tableName.equals(QuotaUtil.QUOTA_TABLE_NAME)){
             description = "The hbase:quota table holds quota information about number" +
             " or size of requests in a given time frame.";

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
deleted file mode 100644
index 101c40e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
+++ /dev/null
@@ -1,215 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.NavigableMap;
-import java.util.NavigableSet;
-import java.util.concurrent.ConcurrentSkipListMap;
-
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.ZKListener;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
-
-/**
- * Class servers two purposes:
- *
- * 1. Broadcast NamespaceDescriptor information via ZK
- * (Done by the Master)
- * 2. Consume broadcasted NamespaceDescriptor changes
- * (Done by the RegionServers)
- *
- */
-@InterfaceAudience.Private
-public class ZKNamespaceManager extends ZKListener {
-  private static final Logger LOG = LoggerFactory.getLogger(ZKNamespaceManager.class);
-  private final String nsZNode;
-  private final NavigableMap<String,NamespaceDescriptor> cache;
-
-  public ZKNamespaceManager(ZKWatcher zkw) throws IOException {
-    super(zkw);
-    nsZNode = zkw.getZNodePaths().namespaceZNode;
-    cache = new ConcurrentSkipListMap<>();
-  }
-
-  public void start() throws IOException {
-    watcher.registerListener(this);
-    try {
-      if (ZKUtil.watchAndCheckExists(watcher, nsZNode)) {
-        List<ZKUtil.NodeAndData> existing =
-            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
-        if (existing != null) {
-          refreshNodes(existing);
-        }
-      } else {
-        ZKUtil.createWithParents(watcher, nsZNode);
-      }
-    } catch (KeeperException e) {
-      throw new IOException("Failed to initialize ZKNamespaceManager", e);
-    }
-  }
-
-  public void stop() throws IOException {
-    this.watcher.unregisterListener(this);
-  }
-
-  public NamespaceDescriptor get(String name) {
-    return cache.get(name);
-  }
-
-  public void update(NamespaceDescriptor ns) throws IOException {
-    writeNamespace(ns);
-    cache.put(ns.getName(), ns);
-  }
-
-  public void remove(String name) throws IOException {
-    deleteNamespace(name);
-    cache.remove(name);
-  }
-
-  public NavigableSet<NamespaceDescriptor> list() throws IOException {
-    NavigableSet<NamespaceDescriptor> ret =
-        Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
-    for(NamespaceDescriptor ns: cache.values()) {
-      ret.add(ns);
-    }
-    return ret;
-  }
-
-  @Override
-  public void nodeCreated(String path) {
-    if (nsZNode.equals(path)) {
-      try {
-        List<ZKUtil.NodeAndData> nodes =
-            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
-        refreshNodes(nodes);
-      } catch (KeeperException ke) {
-        String msg = "Error reading data from zookeeper";
-        LOG.error(msg, ke);
-        watcher.abort(msg, ke);
-      } catch (IOException e) {
-        String msg = "Error parsing data from zookeeper";
-        LOG.error(msg, e);
-        watcher.abort(msg, e);
-      }
-    }
-  }
-
-  @Override
-  public void nodeDeleted(String path) {
-    if (nsZNode.equals(ZKUtil.getParent(path))) {
-      String nsName = ZKUtil.getNodeName(path);
-      cache.remove(nsName);
-    }
-  }
-
-  @Override
-  public void nodeDataChanged(String path) {
-    if (nsZNode.equals(ZKUtil.getParent(path))) {
-      try {
-        byte[] data = ZKUtil.getDataAndWatch(watcher, path);
-        NamespaceDescriptor ns =
-            ProtobufUtil.toNamespaceDescriptor(
-                HBaseProtos.NamespaceDescriptor.parseFrom(data));
-        cache.put(ns.getName(), ns);
-      } catch (KeeperException ke) {
-        String msg = "Error reading data from zookeeper for node "+path;
-        LOG.error(msg, ke);
-        // only option is to abort
-        watcher.abort(msg, ke);
-      } catch (IOException ioe) {
-        String msg = "Error deserializing namespace: "+path;
-        LOG.error(msg, ioe);
-        watcher.abort(msg, ioe);
-      }
-    }
-  }
-
-  @Override
-  public void nodeChildrenChanged(String path) {
-    if (nsZNode.equals(path)) {
-      try {
-        List<ZKUtil.NodeAndData> nodes =
-            ZKUtil.getChildDataAndWatchForNewChildren(watcher, nsZNode);
-        refreshNodes(nodes);
-      } catch (KeeperException ke) {
-        LOG.error("Error reading data from zookeeper for path "+path, ke);
-        watcher.abort("ZooKeeper error get node children for path "+path, ke);
-      } catch (IOException e) {
-        LOG.error("Error deserializing namespace child from: "+path, e);
-        watcher.abort("Error deserializing namespace child from: " + path, e);
-      }
-    }
-  }
-
-  private void deleteNamespace(String name) throws IOException {
-    String zNode = ZNodePaths.joinZNode(nsZNode, name);
-    try {
-      ZKUtil.deleteNode(watcher, zNode);
-    } catch (KeeperException e) {
-      if (e instanceof KeeperException.NoNodeException) {
-        // If the node does not exist, it could be already deleted. Continue without fail.
-        LOG.warn("The ZNode " + zNode + " for namespace " + name + " does not exist.");
-      } else {
-        LOG.error("Failed updating permissions for namespace " + name, e);
-        throw new IOException("Failed updating permissions for namespace " + name, e);
-      }
-    }
-  }
-
-  private void writeNamespace(NamespaceDescriptor ns) throws IOException {
-    String zNode = ZNodePaths.joinZNode(nsZNode, ns.getName());
-    try {
-      ZKUtil.createWithParents(watcher, zNode);
-      ZKUtil.updateExistingNodeData(watcher, zNode,
-          ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray(), -1);
-    } catch (KeeperException e) {
-      LOG.error("Failed updating permissions for namespace "+ns.getName(), e);
-      throw new IOException("Failed updating permissions for namespace "+ns.getName(), e);
-    }
-  }
-
-  private void refreshNodes(List<ZKUtil.NodeAndData> nodes) throws IOException {
-    for (ZKUtil.NodeAndData n : nodes) {
-      if (n.isEmpty()) continue;
-      String path = n.getNode();
-      String namespace = ZKUtil.getNodeName(path);
-      byte[] nodeData = n.getData();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Updating namespace cache from node " + namespace + " with data: " +
-            Bytes.toStringBinary(nodeData));
-      }
-      NamespaceDescriptor ns =
-          ProtobufUtil.toNamespaceDescriptor(
-              HBaseProtos.NamespaceDescriptor.parseFrom(nodeData));
-      cache.put(ns.getName(), ns);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
index 5af7614..1e631b0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterSchemaServiceImpl.java
@@ -18,15 +18,10 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
-import java.util.Set;
-
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.ServiceNotRunningException;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.master.procedure.CreateNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -34,15 +29,17 @@ import org.apache.hadoop.hbase.master.procedure.ModifyNamespaceProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService;
 import org.apache.hadoop.hbase.util.NonceKey;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
+import org.apache.hbase.thirdparty.com.google.common.util.concurrent.AbstractService;
 
 @InterfaceAudience.Private
 class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaService {
+
   private final TableNamespaceManager tableNamespaceManager;
   private final MasterServices masterServices;
-  private final static List<NamespaceDescriptor> EMPTY_NAMESPACE_LIST =
-    Collections.unmodifiableList(new ArrayList<NamespaceDescriptor>(0));
 
   ClusterSchemaServiceImpl(final MasterServices masterServices) {
     this.masterServices = masterServices;
@@ -51,9 +48,10 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS
 
   // All below are synchronized so consistent view on whether running or not.
 
-
   private synchronized void checkIsRunning() throws ServiceNotRunningException {
-    if (!isRunning()) throw new ServiceNotRunningException();
+    if (!isRunning()) {
+      throw new ServiceNotRunningException();
+    }
   }
 
   @Override
@@ -70,10 +68,6 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS
   protected void doStop() {
     // This is no stop for the table manager.
     notifyStopped();
-    TableNamespaceManager tnsm = getTableNamespaceManager();
-    if (tnsm != null) {
-      tnsm.stop("Stopping");
-    }
   }
 
   @Override
@@ -123,10 +117,8 @@ class ClusterSchemaServiceImpl extends AbstractService implements ClusterSchemaS
   @Override
   public List<NamespaceDescriptor> getNamespaces() throws IOException {
     checkIsRunning();
-    Set<NamespaceDescriptor> set = getTableNamespaceManager().list();
-    if (set == null || set.isEmpty()) return EMPTY_NAMESPACE_LIST;
-    List<NamespaceDescriptor> list = new ArrayList<>(set.size());
-    list.addAll(set);
-    return Collections.unmodifiableList(list);
+    return getTableNamespaceManager().list().stream()
+      .sorted(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR)
+      .collect(ImmutableList.toImmutableList());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ae04283..428030d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1112,10 +1112,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     getChoreService().scheduleChore(catalogJanitorChore);
     this.serverManager.startChore();
 
-    // NAMESPACE READ!!!!
-    // Here we expect hbase:namespace to be online. See inside initClusterSchemaService.
-    // TODO: Fix this. Namespace is a pain being a sort-of system table. Fold it in to hbase:meta.
-    // isNamespace does like isMeta and waits until namespace is onlined before allowing progress.
+    // Only for rolling upgrade, where we need to migrate the data in namespace table to meta table.
     if (!waitForNamespaceOnline()) {
       return;
     }
@@ -1243,20 +1240,28 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   /**
    * Check hbase:namespace table is assigned. If not, startup will hang looking for the ns table
-   * (TODO: Fix this! NS should not hold-up startup).
+   * <p/>
+   * This is for rolling upgrading, later we will migrate the data in ns table to the ns family of
+   * meta table. And if this is a new clsuter, this method will return immediately as there will be
+   * no namespace table/region.
    * @return True if namespace table is up/online.
    */
-  @VisibleForTesting
-  public boolean waitForNamespaceOnline() throws InterruptedException {
-    List<RegionInfo> ris = this.assignmentManager.getRegionStates().
-        getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
+  private boolean waitForNamespaceOnline() throws InterruptedException, IOException {
+    TableState nsTableState =
+      MetaTableAccessor.getTableState(getClusterConnection(), TableName.NAMESPACE_TABLE_NAME);
+    if (nsTableState == null || nsTableState.isDisabled()) {
+      // this means we have already migrated the data and disabled or deleted the namespace table,
+      // or this is a new depliy which does not have a namespace table from the beginning.
+      return true;
+    }
+    List<RegionInfo> ris =
+      this.assignmentManager.getRegionStates().getRegionsOfTable(TableName.NAMESPACE_TABLE_NAME);
     if (ris.isEmpty()) {
-      // If empty, means we've not assigned the namespace table yet... Just return true so startup
-      // continues and the namespace table gets created.
+      // maybe this will not happen any more, but anyway, no harm to add a check here...
       return true;
     }
     // Else there are namespace regions up in meta. Ensure they are assigned before we go on.
-    for (RegionInfo ri: ris) {
+    for (RegionInfo ri : ris) {
       isRegionOnline(ri);
     }
     return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index aefeebe..21178e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -19,324 +19,159 @@
 package org.apache.hadoop.hbase.master;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.NavigableSet;
-
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.stream.Collectors;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilderFactory;
-import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZKNamespaceManager;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
-import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Threads;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
 /**
- * This is a helper class used internally to manage the namespace metadata that is stored in
- * TableName.NAMESPACE_TABLE_NAME. It also mirrors updates to the ZK store by forwarding updates to
- * {@link org.apache.hadoop.hbase.ZKNamespaceManager}.
- *
- * WARNING: Do not use. Go via the higher-level {@link ClusterSchema} API instead. This manager
- * is likely to go aways anyways.
+ * This is a helper class used internally to manage the namespace metadata that is stored in the ns
+ * family in meta table.
  */
 @InterfaceAudience.Private
-@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
-  justification="TODO: synchronize access on nsTable but it is done in tiers above and this " +
-    "class is going away/shrinking")
-public class TableNamespaceManager implements Stoppable {
-  private static final Logger LOG = LoggerFactory.getLogger(TableNamespaceManager.class);
-  private volatile boolean stopped = false;
-
-  private Configuration conf;
-  private MasterServices masterServices;
-  private Table nsTable = null; // FindBugs: IS2_INCONSISTENT_SYNC TODO: Access is not synchronized
-  private ZKNamespaceManager zkNamespaceManager;
-  private boolean initialized;
+public class TableNamespaceManager {
 
   public static final String KEY_MAX_REGIONS = "hbase.namespace.quota.maxregions";
   public static final String KEY_MAX_TABLES = "hbase.namespace.quota.maxtables";
   static final String NS_INIT_TIMEOUT = "hbase.master.namespace.init.timeout";
   static final int DEFAULT_NS_INIT_TIMEOUT = 300000;
 
+  private final ConcurrentMap<String, NamespaceDescriptor> cache = new ConcurrentHashMap<>();
+
+  private final MasterServices masterServices;
+
   TableNamespaceManager(MasterServices masterServices) {
     this.masterServices = masterServices;
-    this.conf = masterServices.getConfiguration();
   }
 
-  public void start() throws IOException {
-    if (!MetaTableAccessor.tableExists(masterServices.getConnection(),
-        TableName.NAMESPACE_TABLE_NAME)) {
-      LOG.info("Namespace table not found. Creating...");
-      createNamespaceTable(masterServices);
+  private void migrateNamespaceTable() throws IOException {
+    try (Table nsTable = masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
+      ResultScanner scanner = nsTable.getScanner(
+        new Scan().addFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES).readAllVersions());
+      BufferedMutator mutator =
+        masterServices.getConnection().getBufferedMutator(TableName.META_TABLE_NAME)) {
+      for (Result result;;) {
+        result = scanner.next();
+        if (result == null) {
+          break;
+        }
+        Put put = new Put(result.getRow());
+        result
+          .getColumnCells(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
+            TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES)
+          .forEach(c -> put.addColumn(HConstants.NAMESPACE_FAMILY,
+            HConstants.NAMESPACE_COL_DESC_QUALIFIER, c.getTimestamp(), CellUtil.cloneValue(c)));
+        mutator.mutate(put);
+      }
     }
+    // schedule a disable procedure instead of block waiting here, as when disabling a table we will
+    // wait until master is initialized, but we are part of the initialization...
+    masterServices.getMasterProcedureExecutor().submitProcedure(
+      new DisableTableProcedure(masterServices.getMasterProcedureExecutor().getEnvironment(),
+        TableName.NAMESPACE_TABLE_NAME, false));
+  }
 
-    try {
-      // Wait for the namespace table to be initialized.
-      long startTime = EnvironmentEdgeManager.currentTime();
-      int timeout = conf.getInt(NS_INIT_TIMEOUT, DEFAULT_NS_INIT_TIMEOUT);
-      while (!isTableAvailableAndInitialized()) {
-        if (EnvironmentEdgeManager.currentTime() - startTime + 100 > timeout) {
-          // We can't do anything if ns is not online.
-          throw new IOException("Timedout " + timeout + "ms waiting for namespace table to "
-              + "be assigned and enabled: " + getTableState());
+  private void loadNamespaceIntoCache() throws IOException {
+    try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME);
+      ResultScanner scanner = table.getScanner(HConstants.NAMESPACE_FAMILY)) {
+      for (Result result;;) {
+        result = scanner.next();
+        if (result == null) {
+          break;
         }
-        Thread.sleep(100);
+        Cell cell = result.getColumnLatestCell(HConstants.NAMESPACE_FAMILY,
+          HConstants.NAMESPACE_COL_DESC_QUALIFIER);
+        NamespaceDescriptor ns = ProtobufUtil
+          .toNamespaceDescriptor(HBaseProtos.NamespaceDescriptor.parseFrom(CodedInputStream
+            .newInstance(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())));
+        cache.put(ns.getName(), ns);
       }
-    } catch (InterruptedException e) {
-      throw (InterruptedIOException) new InterruptedIOException().initCause(e);
     }
   }
 
-  private synchronized Table getNamespaceTable() throws IOException {
-    if (!isTableNamespaceManagerInitialized()) {
-      throw new IOException(this.getClass().getName() + " isn't ready to serve");
+  public void start() throws IOException {
+    TableState nsTableState = MetaTableAccessor.getTableState(masterServices.getConnection(),
+      TableName.NAMESPACE_TABLE_NAME);
+    if (nsTableState != null && nsTableState.isEnabled()) {
+      migrateNamespaceTable();
     }
-    return nsTable;
+    loadNamespaceIntoCache();
   }
 
-  /*
+  /**
    * check whether a namespace has already existed.
    */
-  public boolean doesNamespaceExist(final String namespaceName) throws IOException {
-    if (nsTable == null) {
-      throw new IOException(this.getClass().getName() + " isn't ready to serve");
-    }
-    return (get(nsTable, namespaceName) != null);
+  public boolean doesNamespaceExist(String namespaceName) throws IOException {
+    return cache.containsKey(namespaceName);
   }
 
-  public synchronized NamespaceDescriptor get(String name) throws IOException {
-    if (!isTableNamespaceManagerInitialized()) {
-      return null;
-    }
-    return zkNamespaceManager.get(name);
+  public NamespaceDescriptor get(String name) throws IOException {
+    return cache.get(name);
   }
 
-  private NamespaceDescriptor get(Table table, String name) throws IOException {
-    Result res = table.get(new Get(Bytes.toBytes(name)));
-    if (res.isEmpty()) {
-      return null;
-    }
-    byte[] val = CellUtil.cloneValue(res.getColumnLatestCell(
-        HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES, HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
-    return
-        ProtobufUtil.toNamespaceDescriptor(
-            HBaseProtos.NamespaceDescriptor.parseFrom(val));
+  public void addOrUpdateNamespace(NamespaceDescriptor ns) throws IOException {
+    insertNamespaceToMeta(masterServices.getConnection(), ns);
+    cache.put(ns.getName(), ns);
   }
 
-  public void insertIntoNSTable(final NamespaceDescriptor ns) throws IOException {
-    if (nsTable == null) {
-      throw new IOException(this.getClass().getName() + " isn't ready to serve");
-    }
+  public static void insertNamespaceToMeta(Connection conn, NamespaceDescriptor ns)
+      throws IOException {
     byte[] row = Bytes.toBytes(ns.getName());
-    Put p = new Put(row, true);
-    p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
-          .setRow(row)
-          .setFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES)
-          .setQualifier(TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES)
-          .setTimestamp(p.getTimestamp())
-          .setType(Cell.Type.Put)
-          .setValue(ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray())
-          .build());
-    nsTable.put(p);
-  }
-
-  public void updateZKNamespaceManager(final NamespaceDescriptor ns) throws IOException {
-    try {
-      zkNamespaceManager.update(ns);
-    } catch (IOException ex) {
-      String msg = "Failed to update namespace information in ZK.";
-      LOG.error(msg, ex);
-      throw new IOException(msg, ex);
+    Put put = new Put(row, true).addColumn(HConstants.NAMESPACE_FAMILY,
+      HConstants.NAMESPACE_COL_DESC_QUALIFIER,
+      ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray());
+    try (Table table = conn.getTable(TableName.META_TABLE_NAME)) {
+      table.put(put);
     }
   }
 
-  public void removeFromNSTable(final String namespaceName) throws IOException {
-    if (nsTable == null) {
-      throw new IOException(this.getClass().getName() + " isn't ready to serve");
-    }
+  public void deleteNamespace(String namespaceName) throws IOException {
     Delete d = new Delete(Bytes.toBytes(namespaceName));
-    nsTable.delete(d);
-  }
-
-  public void removeFromZKNamespaceManager(final String namespaceName) throws IOException {
-    zkNamespaceManager.remove(namespaceName);
-  }
-
-  public synchronized NavigableSet<NamespaceDescriptor> list() throws IOException {
-    NavigableSet<NamespaceDescriptor> ret =
-        Sets.newTreeSet(NamespaceDescriptor.NAMESPACE_DESCRIPTOR_COMPARATOR);
-    ResultScanner scanner =
-        getNamespaceTable().getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
-    try {
-      for(Result r : scanner) {
-        byte[] val = CellUtil.cloneValue(r.getColumnLatestCell(
-          HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
-          HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
-        ret.add(ProtobufUtil.toNamespaceDescriptor(
-            HBaseProtos.NamespaceDescriptor.parseFrom(val)));
-      }
-    } finally {
-      scanner.close();
-    }
-    return ret;
-  }
-
-  private void createNamespaceTable(MasterServices masterServices) throws IOException {
-    masterServices.createSystemTable(HTableDescriptor.NAMESPACE_TABLEDESC);
-  }
-
-  @SuppressWarnings("deprecation")
-  private boolean isTableNamespaceManagerInitialized() throws IOException {
-    if (initialized) {
-      this.nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Create Namespace in a blocking manner. Keeps trying until
-   * {@link ClusterSchema#HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY} expires.
-   * Note, by-passes notifying coprocessors and name checks. Use for system namespaces only.
-   */
-  private void blockingCreateNamespace(final NamespaceDescriptor namespaceDescriptor)
-      throws IOException {
-    ClusterSchema clusterSchema = this.masterServices.getClusterSchema();
-    long procId = clusterSchema.createNamespace(namespaceDescriptor, null, ProcedurePrepareLatch.getNoopLatch());
-    block(this.masterServices, procId);
-  }
-
-
-  /**
-   * An ugly utility to be removed when refactor TableNamespaceManager.
-   * @throws TimeoutIOException
-   */
-  private static void block(final MasterServices services, final long procId)
-  throws TimeoutIOException {
-    int timeoutInMillis = services.getConfiguration().
-        getInt(ClusterSchema.HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT_KEY,
-            ClusterSchema.DEFAULT_HBASE_MASTER_CLUSTER_SCHEMA_OPERATION_TIMEOUT);
-    long deadlineTs = EnvironmentEdgeManager.currentTime() + timeoutInMillis;
-    ProcedureExecutor<MasterProcedureEnv> procedureExecutor =
-        services.getMasterProcedureExecutor();
-    while(EnvironmentEdgeManager.currentTime() < deadlineTs) {
-      if (procedureExecutor.isFinished(procId)) return;
-      // Sleep some
-      Threads.sleep(10);
-    }
-    throw new TimeoutIOException("Procedure pid=" + procId + " is still running");
-  }
-
-  /**
-   * This method checks if the namespace table is assigned and then
-   * tries to create its Table reference. If it was already created before, it also makes
-   * sure that the connection isn't closed.
-   * @return true if the namespace table manager is ready to serve, false otherwise
-   */
-  @SuppressWarnings("deprecation")
-  public synchronized boolean isTableAvailableAndInitialized()
-  throws IOException {
-    // Did we already get a table? If so, still make sure it's available
-    if (isTableNamespaceManagerInitialized()) {
-      return true;
-    }
-
-    // Now check if the table is assigned, if not then fail fast
-    if (isTableAssigned() && isTableEnabled()) {
-      try {
-        boolean initGoodSofar = true;
-        nsTable = this.masterServices.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME);
-        zkNamespaceManager = new ZKNamespaceManager(masterServices.getZooKeeper());
-        zkNamespaceManager.start();
-
-        if (get(nsTable, NamespaceDescriptor.DEFAULT_NAMESPACE.getName()) == null) {
-          blockingCreateNamespace(NamespaceDescriptor.DEFAULT_NAMESPACE);
-        }
-        if (get(nsTable, NamespaceDescriptor.SYSTEM_NAMESPACE.getName()) == null) {
-          blockingCreateNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE);
-        }
-
-        if (!initGoodSofar) {
-          // some required namespace is created asynchronized. We should complete init later.
-          return false;
-        }
-
-        ResultScanner scanner = nsTable.getScanner(HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES);
-        try {
-          for (Result result : scanner) {
-            byte[] val =  CellUtil.cloneValue(result.getColumnLatestCell(
-                HTableDescriptor.NAMESPACE_FAMILY_INFO_BYTES,
-                HTableDescriptor.NAMESPACE_COL_DESC_BYTES));
-            NamespaceDescriptor ns =
-                ProtobufUtil.toNamespaceDescriptor(
-                    HBaseProtos.NamespaceDescriptor.parseFrom(val));
-            zkNamespaceManager.update(ns);
-          }
-        } finally {
-          scanner.close();
-        }
-        initialized = true;
-        return true;
-      } catch (IOException ie) {
-        LOG.warn("Caught exception in initializing namespace table manager", ie);
-        if (nsTable != null) {
-          nsTable.close();
-        }
-        throw ie;
-      }
+    try (Table table = masterServices.getConnection().getTable(TableName.META_TABLE_NAME)) {
+      table.delete(d);
     }
-    return false;
-  }
-
-  private TableState getTableState() throws IOException {
-    return masterServices.getTableStateManager().getTableState(TableName.NAMESPACE_TABLE_NAME);
-  }
-
-  private boolean isTableEnabled() throws IOException {
-    return getTableState().isEnabled();
+    cache.remove(namespaceName);
   }
 
-  private boolean isTableAssigned() {
-    // TODO: we have a better way now (wait on event)
-    return masterServices.getAssignmentManager()
-        .getRegionStates().hasTableRegionStates(TableName.NAMESPACE_TABLE_NAME);
+  public List<NamespaceDescriptor> list() throws IOException {
+    return cache.values().stream().collect(Collectors.toList());
   }
 
   public void validateTableAndRegionCount(NamespaceDescriptor desc) throws IOException {
     if (getMaxRegions(desc) <= 0) {
-      throw new ConstraintException("The max region quota for " + desc.getName()
-          + " is less than or equal to zero.");
+      throw new ConstraintException(
+        "The max region quota for " + desc.getName() + " is less than or equal to zero.");
     }
     if (getMaxTables(desc) <= 0) {
-      throw new ConstraintException("The max tables quota for " + desc.getName()
-          + " is less than or equal to zero.");
+      throw new ConstraintException(
+        "The max tables quota for " + desc.getName() + " is less than or equal to zero.");
     }
   }
 
@@ -371,31 +206,4 @@ public class TableNamespaceManager implements Stoppable {
     }
     return maxRegions;
   }
-
-  @Override
-  public boolean isStopped() {
-    return this.stopped;
-  }
-
-  @Override
-  public void stop(String why) {
-    if (this.stopped) {
-      return;
-    }
-    try {
-      if (this.zkNamespaceManager != null) {
-        this.zkNamespaceManager.stop();
-      }
-    } catch (IOException ioe) {
-      LOG.warn("Failed NamespaceManager close", ioe);
-    }
-    try {
-      if (this.nsTable != null) {
-        this.nsTable.close();
-      }
-    } catch (IOException ioe) {
-      LOG.warn("Failed Namespace Table close", ioe);
-    }
-    this.stopped = true;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 3a87bbc..4fba2dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -366,7 +366,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   }
 
   private LockInterface setupNamespaceLock() throws IllegalArgumentException {
-    this.tableName = TableName.NAMESPACE_TABLE_NAME;
+    this.tableName = TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME;
     switch (type) {
       case EXCLUSIVE:
         this.opType = TableOperationType.EDIT;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
index 341d116..e751034 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
@@ -18,18 +18,25 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import java.io.IOException;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.TableNamespaceManager;
 import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
- * Base class for all the Namespace procedures that want to use a StateMachineProcedure.
- * It provide some basic helpers like basic locking and basic toStringClassDetails().
+ * Base class for all the Namespace procedures that want to use a StateMachineProcedure. It provide
+ * some basic helpers like basic locking and basic toStringClassDetails().
  */
 @InterfaceAudience.Private
 public abstract class AbstractStateMachineNamespaceProcedure<TState>
-    extends StateMachineProcedure<MasterProcedureEnv, TState>
-    implements TableProcedureInterface {
+    extends StateMachineProcedure<MasterProcedureEnv, TState> implements TableProcedureInterface {
 
   private final ProcedurePrepareLatch syncLatch;
 
@@ -52,7 +59,7 @@ public abstract class AbstractStateMachineNamespaceProcedure<TState>
 
   @Override
   public TableName getTableName() {
-    return TableName.NAMESPACE_TABLE_NAME;
+    return DUMMY_NAMESPACE_TABLE_NAME;
   }
 
   @Override
@@ -83,6 +90,35 @@ public abstract class AbstractStateMachineNamespaceProcedure<TState>
     env.getProcedureScheduler().wakeNamespaceExclusiveLock(this, getNamespaceName());
   }
 
+  /**
+   * Insert/update the row into the ns family of meta table.
+   * @param env MasterProcedureEnv
+   */
+  protected static void addOrUpdateNamespace(MasterProcedureEnv env, NamespaceDescriptor ns)
+      throws IOException {
+    getTableNamespaceManager(env).addOrUpdateNamespace(ns);
+  }
+
+  protected static TableNamespaceManager getTableNamespaceManager(MasterProcedureEnv env) {
+    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
+  }
+
+  /**
+   * Create the namespace directory
+   * @param env MasterProcedureEnv
+   * @param nsDescriptor NamespaceDescriptor
+   */
+  protected static void createDirectory(MasterProcedureEnv env, NamespaceDescriptor nsDescriptor)
+      throws IOException {
+    createDirectory(env.getMasterServices().getMasterFileSystem(), nsDescriptor);
+  }
+
+  @VisibleForTesting
+  public static void createDirectory(MasterFileSystem mfs, NamespaceDescriptor nsDescriptor)
+      throws IOException {
+    mfs.getFileSystem().mkdirs(FSUtils.getNamespaceDir(mfs.getRootDir(), nsDescriptor.getName()));
+  }
+
   protected void releaseSyncLatch() {
     ProcedurePrepareLatch.releaseLatch(syncLatch, this);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 2f56e83..28f7585 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -15,23 +15,19 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceExistException;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.TableNamespaceManager;
-import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.CreateNamespaceState;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to create a new namespace.
@@ -42,10 +38,8 @@ public class CreateNamespaceProcedure
   private static final Logger LOG = LoggerFactory.getLogger(CreateNamespaceProcedure.class);
 
   private NamespaceDescriptor nsDescriptor;
-  private Boolean traceEnabled;
 
   public CreateNamespaceProcedure() {
-    this.traceEnabled = null;
   }
 
   public CreateNamespaceProcedure(final MasterProcedureEnv env,
@@ -57,43 +51,40 @@ public class CreateNamespaceProcedure
       final NamespaceDescriptor nsDescriptor, ProcedurePrepareLatch latch) {
     super(env, latch);
     this.nsDescriptor = nsDescriptor;
-    this.traceEnabled = null;
   }
 
   @Override
   protected Flow executeFromState(final MasterProcedureEnv env, final CreateNamespaceState state)
       throws InterruptedException {
-    if (isTraceEnabled()) {
-      LOG.trace(this + " execute state=" + state);
-    }
+    LOG.trace("{} execute state={}", this, state);
     try {
       switch (state) {
-      case CREATE_NAMESPACE_PREPARE:
-        boolean success = prepareCreate(env);
-        releaseSyncLatch();
-        if (!success) {
-          assert isFailed() : "createNamespace should have an exception here";
+        case CREATE_NAMESPACE_PREPARE:
+          boolean success = prepareCreate(env);
+          releaseSyncLatch();
+          if (!success) {
+            assert isFailed() : "createNamespace should have an exception here";
+            return Flow.NO_MORE_STATE;
+          }
+          setNextState(CreateNamespaceState.CREATE_NAMESPACE_CREATE_DIRECTORY);
+          break;
+        case CREATE_NAMESPACE_CREATE_DIRECTORY:
+          createDirectory(env, nsDescriptor);
+          setNextState(CreateNamespaceState.CREATE_NAMESPACE_INSERT_INTO_NS_TABLE);
+          break;
+        case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
+          addOrUpdateNamespace(env, nsDescriptor);
+          setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
+          break;
+        case CREATE_NAMESPACE_UPDATE_ZK:
+          // not used any more
+          setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
+          break;
+        case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
+          setNamespaceQuota(env, nsDescriptor);
           return Flow.NO_MORE_STATE;
-        }
-        setNextState(CreateNamespaceState.CREATE_NAMESPACE_CREATE_DIRECTORY);
-        break;
-      case CREATE_NAMESPACE_CREATE_DIRECTORY:
-        createDirectory(env, nsDescriptor);
-        setNextState(CreateNamespaceState.CREATE_NAMESPACE_INSERT_INTO_NS_TABLE);
-        break;
-      case CREATE_NAMESPACE_INSERT_INTO_NS_TABLE:
-        insertIntoNSTable(env, nsDescriptor);
-        setNextState(CreateNamespaceState.CREATE_NAMESPACE_UPDATE_ZK);
-        break;
-      case CREATE_NAMESPACE_UPDATE_ZK:
-        updateZKNamespaceManager(env, nsDescriptor);
-        setNextState(CreateNamespaceState.CREATE_NAMESPACE_SET_NAMESPACE_QUOTA);
-        break;
-      case CREATE_NAMESPACE_SET_NAMESPACE_QUOTA:
-        setNamespaceQuota(env, nsDescriptor);
-        return Flow.NO_MORE_STATE;
-      default:
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
+        default:
+          throw new UnsupportedOperationException(this + " unhandled state=" + state);
       }
     } catch (IOException e) {
       if (isRollbackSupported(state)) {
@@ -145,39 +136,26 @@ public class CreateNamespaceProcedure
   }
 
   @Override
-  protected void serializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.serializeStateData(serializer);
 
     MasterProcedureProtos.CreateNamespaceStateData.Builder createNamespaceMsg =
-        MasterProcedureProtos.CreateNamespaceStateData.newBuilder().setNamespaceDescriptor(
-          ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
+      MasterProcedureProtos.CreateNamespaceStateData.newBuilder()
+        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
     serializer.serialize(createNamespaceMsg.build());
   }
 
   @Override
-  protected void deserializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.deserializeStateData(serializer);
 
     MasterProcedureProtos.CreateNamespaceStateData createNamespaceMsg =
-        serializer.deserialize(MasterProcedureProtos.CreateNamespaceStateData.class);
+      serializer.deserialize(MasterProcedureProtos.CreateNamespaceStateData.class);
     nsDescriptor = ProtobufUtil.toNamespaceDescriptor(createNamespaceMsg.getNamespaceDescriptor());
   }
 
-  private boolean isBootstrapNamespace() {
-    return nsDescriptor.equals(NamespaceDescriptor.DEFAULT_NAMESPACE) ||
-        nsDescriptor.equals(NamespaceDescriptor.SYSTEM_NAMESPACE);
-  }
-
   @Override
   protected boolean waitInitialized(MasterProcedureEnv env) {
-    // Namespace manager might not be ready if master is not fully initialized,
-    // return false to reject user namespace creation; return true for default
-    // and system namespace creation (this is part of master initialization).
-    if (isBootstrapNamespace()) {
-      return false;
-    }
     return env.waitInitialized(this);
   }
 
@@ -202,12 +180,11 @@ public class CreateNamespaceProcedure
   /**
    * Action before any real action of creating namespace.
    * @param env MasterProcedureEnv
-   * @throws IOException
    */
   private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
     if (getTableNamespaceManager(env).doesNamespaceExist(nsDescriptor.getName())) {
       setFailure("master-create-namespace",
-          new NamespaceExistException("Namespace " + nsDescriptor.getName() + " already exists"));
+        new NamespaceExistException("Namespace " + nsDescriptor.getName() + " already exists"));
       return false;
     }
     getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
@@ -215,77 +192,14 @@ public class CreateNamespaceProcedure
   }
 
   /**
-   * Create the namespace directory
-   * @param env MasterProcedureEnv
-   * @param nsDescriptor NamespaceDescriptor
-   * @throws IOException
-   */
-  protected static void createDirectory(
-      final MasterProcedureEnv env,
-      final NamespaceDescriptor nsDescriptor) throws IOException {
-    MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
-    mfs.getFileSystem().mkdirs(
-      FSUtils.getNamespaceDir(mfs.getRootDir(), nsDescriptor.getName()));
-  }
-
-  /**
-   * Insert the row into ns table
-   * @param env MasterProcedureEnv
-   * @param nsDescriptor NamespaceDescriptor
-   * @throws IOException
-   */
-  protected static void insertIntoNSTable(
-      final MasterProcedureEnv env,
-      final NamespaceDescriptor nsDescriptor) throws IOException {
-    getTableNamespaceManager(env).insertIntoNSTable(nsDescriptor);
-  }
-
-  /**
-   * Update ZooKeeper.
-   * @param env MasterProcedureEnv
-   * @param nsDescriptor NamespaceDescriptor
-   * @throws IOException
-   */
-  protected static void updateZKNamespaceManager(
-      final MasterProcedureEnv env,
-      final NamespaceDescriptor nsDescriptor) throws IOException {
-    getTableNamespaceManager(env).updateZKNamespaceManager(nsDescriptor);
-  }
-
-  /**
    * Set quota for the namespace
    * @param env MasterProcedureEnv
    * @param nsDescriptor NamespaceDescriptor
-   * @throws IOException
    **/
-  protected static void setNamespaceQuota(
-      final MasterProcedureEnv env,
+  private static void setNamespaceQuota(final MasterProcedureEnv env,
       final NamespaceDescriptor nsDescriptor) throws IOException {
     if (env.getMasterServices().isInitialized()) {
       env.getMasterServices().getMasterQuotaManager().setNamespaceQuota(nsDescriptor);
     }
   }
-
-  private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @return traceEnabled
-   */
-  private Boolean isTraceEnabled() {
-    if (traceEnabled == null) {
-      traceEnabled = LOG.isTraceEnabled();
-    }
-    return traceEnabled;
-  }
-
-  @Override
-  protected boolean shouldWaitClientAck(MasterProcedureEnv env) {
-    // hbase and default namespaces are created on bootstrap internally by the system
-    // the client does not know about this procedures.
-    return !isBootstrapNamespace();
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
index 8369a19..d3749a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DeleteNamespaceProcedure.java
@@ -15,29 +15,27 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
-import org.apache.hadoop.hbase.master.TableNamespaceManager;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DeleteNamespaceState;
-import org.apache.hadoop.hbase.util.FSUtils;
 
 /**
  * The procedure to remove a namespace.
@@ -49,64 +47,59 @@ public class DeleteNamespaceProcedure
 
   private NamespaceDescriptor nsDescriptor;
   private String namespaceName;
-  private Boolean traceEnabled;
 
   public DeleteNamespaceProcedure() {
-    this.nsDescriptor = null;
-    this.traceEnabled = null;
   }
 
-  public DeleteNamespaceProcedure(final MasterProcedureEnv env, final String namespaceName) {
+  public DeleteNamespaceProcedure(MasterProcedureEnv env, String namespaceName) {
     this(env, namespaceName, null);
   }
 
-  public DeleteNamespaceProcedure(final MasterProcedureEnv env, final String namespaceName,
+  public DeleteNamespaceProcedure(MasterProcedureEnv env, String namespaceName,
       final ProcedurePrepareLatch latch) {
     super(env, latch);
     this.namespaceName = namespaceName;
-    this.nsDescriptor = null;
-    this.traceEnabled = null;
   }
 
   @Override
-  protected Flow executeFromState(final MasterProcedureEnv env, final DeleteNamespaceState state)
+  protected Flow executeFromState(MasterProcedureEnv env, DeleteNamespaceState state)
       throws InterruptedException {
     LOG.info(this.toString());
     try {
       switch (state) {
-      case DELETE_NAMESPACE_PREPARE:
-        boolean present = prepareDelete(env);
-        releaseSyncLatch();
-        if (!present) {
-          assert isFailed() : "Delete namespace should have an exception here";
+        case DELETE_NAMESPACE_PREPARE:
+          boolean present = prepareDelete(env);
+          releaseSyncLatch();
+          if (!present) {
+            assert isFailed() : "Delete namespace should have an exception here";
+            return Flow.NO_MORE_STATE;
+          }
+          setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_FROM_NS_TABLE);
+          break;
+        case DELETE_NAMESPACE_DELETE_FROM_NS_TABLE:
+          deleteNamespace(env, namespaceName);
+          setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_DIRECTORIES);
+          break;
+        case DELETE_NAMESPACE_REMOVE_FROM_ZK:
+          // not used any more
+          setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_DIRECTORIES);
+          break;
+        case DELETE_NAMESPACE_DELETE_DIRECTORIES:
+          deleteDirectory(env, namespaceName);
+          setNextState(DeleteNamespaceState.DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA);
+          break;
+        case DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA:
+          removeNamespaceQuota(env, namespaceName);
           return Flow.NO_MORE_STATE;
-        }
-        setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_FROM_NS_TABLE);
-        break;
-      case DELETE_NAMESPACE_DELETE_FROM_NS_TABLE:
-        deleteFromNSTable(env, namespaceName);
-        setNextState(DeleteNamespaceState.DELETE_NAMESPACE_REMOVE_FROM_ZK);
-        break;
-      case DELETE_NAMESPACE_REMOVE_FROM_ZK:
-        removeFromZKNamespaceManager(env, namespaceName);
-        setNextState(DeleteNamespaceState.DELETE_NAMESPACE_DELETE_DIRECTORIES);
-        break;
-      case DELETE_NAMESPACE_DELETE_DIRECTORIES:
-        deleteDirectory(env, namespaceName);
-        setNextState(DeleteNamespaceState.DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA);
-        break;
-      case DELETE_NAMESPACE_REMOVE_NAMESPACE_QUOTA:
-        removeNamespaceQuota(env, namespaceName);
-        return Flow.NO_MORE_STATE;
-      default:
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
+        default:
+          throw new UnsupportedOperationException(this + " unhandled state=" + state);
       }
     } catch (IOException e) {
       if (isRollbackSupported(state)) {
         setFailure("master-delete-namespace", e);
       } else {
-        LOG.warn("Retriable error trying to delete namespace " + namespaceName +
-          " (in state=" + state + ")", e);
+        LOG.warn("Retriable error trying to delete namespace " + namespaceName + " (in state=" +
+          state + ")", e);
       }
     }
     return Flow.HAS_MORE_STATE;
@@ -139,7 +132,7 @@ public class DeleteNamespaceProcedure
 
   @Override
   protected DeleteNamespaceState getState(final int stateId) {
-    return DeleteNamespaceState.valueOf(stateId);
+    return DeleteNamespaceState.forNumber(stateId);
   }
 
   @Override
@@ -153,30 +146,28 @@ public class DeleteNamespaceProcedure
   }
 
   @Override
-  protected void serializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.serializeStateData(serializer);
 
     MasterProcedureProtos.DeleteNamespaceStateData.Builder deleteNamespaceMsg =
-        MasterProcedureProtos.DeleteNamespaceStateData.newBuilder().setNamespaceName(namespaceName);
+      MasterProcedureProtos.DeleteNamespaceStateData.newBuilder().setNamespaceName(namespaceName);
     if (this.nsDescriptor != null) {
-      deleteNamespaceMsg.setNamespaceDescriptor(
-        ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
+      deleteNamespaceMsg
+        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(this.nsDescriptor));
     }
     serializer.serialize(deleteNamespaceMsg.build());
   }
 
   @Override
-  protected void deserializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.deserializeStateData(serializer);
 
     MasterProcedureProtos.DeleteNamespaceStateData deleteNamespaceMsg =
-        serializer.deserialize(MasterProcedureProtos.DeleteNamespaceStateData.class);
+      serializer.deserialize(MasterProcedureProtos.DeleteNamespaceStateData.class);
     namespaceName = deleteNamespaceMsg.getNamespaceName();
     if (deleteNamespaceMsg.hasNamespaceDescriptor()) {
       nsDescriptor =
-          ProtobufUtil.toNamespaceDescriptor(deleteNamespaceMsg.getNamespaceDescriptor());
+        ProtobufUtil.toNamespaceDescriptor(deleteNamespaceMsg.getNamespaceDescriptor());
     }
   }
 
@@ -193,7 +184,6 @@ public class DeleteNamespaceProcedure
   /**
    * Action before any real action of deleting namespace.
    * @param env MasterProcedureEnv
-   * @throws IOException
    */
   private boolean prepareDelete(final MasterProcedureEnv env) throws IOException {
     if (getTableNamespaceManager(env).doesNamespaceExist(namespaceName) == false) {
@@ -201,8 +191,8 @@ public class DeleteNamespaceProcedure
       return false;
     }
     if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(namespaceName)) {
-      setFailure("master-delete-namespace", new ConstraintException(
-          "Reserved namespace "+ namespaceName +" cannot be removed."));
+      setFailure("master-delete-namespace",
+        new ConstraintException("Reserved namespace " + namespaceName + " cannot be removed."));
       return false;
     }
 
@@ -214,9 +204,9 @@ public class DeleteNamespaceProcedure
       return false;
     }
     if (tableCount > 0) {
-      setFailure("master-delete-namespace", new ConstraintException(
-          "Only empty namespaces can be removed. Namespace "+ namespaceName + " has "
-          + tableCount +" tables"));
+      setFailure("master-delete-namespace",
+        new ConstraintException("Only empty namespaces can be removed. Namespace " + namespaceName +
+          " has " + tableCount + " tables"));
       return false;
     }
 
@@ -226,76 +216,28 @@ public class DeleteNamespaceProcedure
   }
 
   /**
-   * delete the row from namespace table
+   * delete the row from the ns family in meta table.
    * @param env MasterProcedureEnv
    * @param namespaceName name of the namespace in string format
-   * @throws IOException
    */
-  protected static void deleteFromNSTable(
-      final MasterProcedureEnv env,
-      final String namespaceName) throws IOException {
-    getTableNamespaceManager(env).removeFromNSTable(namespaceName);
-  }
-
-  /**
-   * undo the delete
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void undoDeleteFromNSTable(final MasterProcedureEnv env) {
-    try {
-      if (nsDescriptor != null) {
-        CreateNamespaceProcedure.insertIntoNSTable(env, nsDescriptor);
-      }
-    } catch (Exception e) {
-      // Ignore
-      LOG.debug("Rollback of deleteFromNSTable throws exception: " + e);
-    }
-  }
-
-  /**
-   * remove from ZooKeeper.
-   * @param env MasterProcedureEnv
-   * @param namespaceName name of the namespace in string format
-   * @throws IOException
-   */
-  protected static void removeFromZKNamespaceManager(
-      final MasterProcedureEnv env,
-      final String namespaceName) throws IOException {
-    getTableNamespaceManager(env).removeFromZKNamespaceManager(namespaceName);
-  }
-
-  /**
-   * undo the remove from ZooKeeper
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void undoRemoveFromZKNamespaceManager(final MasterProcedureEnv env) {
-    try {
-      if (nsDescriptor != null) {
-        CreateNamespaceProcedure.updateZKNamespaceManager(env, nsDescriptor);
-      }
-    } catch (Exception e) {
-      // Ignore
-      LOG.debug("Rollback of removeFromZKNamespaceManager throws exception: " + e);
-    }
+  private static void deleteNamespace(MasterProcedureEnv env, String namespaceName)
+      throws IOException {
+    getTableNamespaceManager(env).deleteNamespace(namespaceName);
   }
 
   /**
    * Delete the namespace directories from the file system
    * @param env MasterProcedureEnv
    * @param namespaceName name of the namespace in string format
-   * @throws IOException
    */
-  protected static void deleteDirectory(
-      final MasterProcedureEnv env,
-      final String namespaceName) throws IOException {
+  private static void deleteDirectory(MasterProcedureEnv env, String namespaceName)
+      throws IOException {
     MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
     FileSystem fs = mfs.getFileSystem();
     Path p = FSUtils.getNamespaceDir(mfs.getRootDir(), namespaceName);
 
     try {
-      for(FileStatus status : fs.listStatus(p)) {
+      for (FileStatus status : fs.listStatus(p)) {
         if (!HConstants.HBASE_NON_TABLE_DIRS.contains(status.getPath().getName())) {
           throw new IOException("Namespace directory contains table dir: " + status.getPath());
         }
@@ -310,57 +252,12 @@ public class DeleteNamespaceProcedure
   }
 
   /**
-   * undo delete directory
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void rollbackDeleteDirectory(final MasterProcedureEnv env) throws IOException {
-    try {
-      CreateNamespaceProcedure.createDirectory(env, nsDescriptor);
-    } catch (Exception e) {
-      // Ignore exception
-      LOG.debug("Rollback of deleteDirectory throws exception: " + e);
-    }
-  }
-
-  /**
    * remove quota for the namespace
    * @param env MasterProcedureEnv
    * @param namespaceName name of the namespace in string format
-   * @throws IOException
    **/
-  protected static void removeNamespaceQuota(
-      final MasterProcedureEnv env,
-      final String namespaceName) throws IOException {
+  private static void removeNamespaceQuota(final MasterProcedureEnv env, final String namespaceName)
+      throws IOException {
     env.getMasterServices().getMasterQuotaManager().removeNamespaceQuota(namespaceName);
   }
-
-  /**
-   * undo remove quota for the namespace
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   **/
-  private void rollbacRemoveNamespaceQuota(final MasterProcedureEnv env) throws IOException {
-    try {
-      CreateNamespaceProcedure.setNamespaceQuota(env, nsDescriptor);
-    } catch (Exception e) {
-      // Ignore exception
-      LOG.debug("Rollback of removeNamespaceQuota throws exception: " + e);
-    }
-  }
-
-  private static TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
-  }
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @return traceEnabled
-   */
-  private Boolean isTraceEnabled() {
-    if (traceEnabled == null) {
-      traceEnabled = LOG.isTraceEnabled();
-    }
-    return traceEnabled;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
index 024f3ea..5ca7972 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/InitMetaProcedure.java
@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.apache.hadoop.hbase.NamespaceDescriptor.DEFAULT_NAMESPACE;
+import static org.apache.hadoop.hbase.NamespaceDescriptor.SYSTEM_NAMESPACE;
+import static org.apache.hadoop.hbase.master.TableNamespaceManager.insertNamespaceToMeta;
+import static org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure.createDirectory;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.concurrent.CountDownLatch;
@@ -25,11 +30,15 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder;
 import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.InitMetaStateData;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 
 /**
  * This procedure is used to initialize meta table for a new hbase deploy. It will just schedule an
@@ -38,8 +47,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.I
 @InterfaceAudience.Private
 public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMetaState> {
 
+  private static final Logger LOG = LoggerFactory.getLogger(InitMetaProcedure.class);
+
   private CountDownLatch latch = new CountDownLatch(1);
 
+  private int attempts;
+
   @Override
   public TableName getTableName() {
     return TableName.META_TABLE_NAME;
@@ -53,10 +66,32 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, InitMetaState state)
       throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+    LOG.debug("Execute {}", this);
     switch (state) {
       case INIT_META_ASSIGN_META:
+        LOG.info("Going to assign meta");
         addChildProcedure(env.getAssignmentManager()
           .createAssignProcedures(Arrays.asList(RegionInfoBuilder.FIRST_META_REGIONINFO)));
+        setNextState(InitMetaState.INIT_META_CREATE_NAMESPACES);
+        return Flow.HAS_MORE_STATE;
+      case INIT_META_CREATE_NAMESPACES:
+        LOG.info("Going to create {} and {} namespaces", DEFAULT_NAMESPACE, SYSTEM_NAMESPACE);
+        try {
+          createDirectory(env, DEFAULT_NAMESPACE);
+          createDirectory(env, SYSTEM_NAMESPACE);
+          // here the TableNamespaceManager has not been initialized yet, so we have to insert the
+          // record directly into meta table, later the TableNamespaceManager will load these two
+          // namespaces when starting.
+          insertNamespaceToMeta(env.getMasterServices().getConnection(), DEFAULT_NAMESPACE);
+          insertNamespaceToMeta(env.getMasterServices().getConnection(), SYSTEM_NAMESPACE);
+        } catch (IOException e) {
+          long backoff = ProcedureUtil.getBackoffTimeMs(this.attempts++);
+          LOG.warn("Failed to init default and system namespaces, suspend {}secs", backoff, e);
+          setTimeout(Math.toIntExact(backoff));
+          setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+          skipPersistence();
+          throw new ProcedureSuspendedException();
+        }
         return Flow.NO_MORE_STATE;
       default:
         throw new UnsupportedOperationException("unhandled state=" + state);
@@ -70,6 +105,13 @@ public class InitMetaProcedure extends AbstractStateMachineTableProcedure<InitMe
   }
 
   @Override
+  protected synchronized boolean setTimeoutFailure(MasterProcedureEnv env) {
+    setState(ProcedureProtos.ProcedureState.RUNNABLE);
+    env.getProcedureScheduler().addFront(this);
+    return false;
+  }
+
+  @Override
   protected LockState acquireLock(MasterProcedureEnv env) {
     if (env.getProcedureScheduler().waitTableExclusiveLock(this, getTableName())) {
       return LockState.LOCK_EVENT_WAIT;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index edf015a..4bf16ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -808,11 +808,11 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     schedLock();
     try {
       final LockAndQueue systemNamespaceTableLock =
-        locking.getTableLock(TableName.NAMESPACE_TABLE_NAME);
+        locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
       if (!systemNamespaceTableLock.trySharedLock(procedure)) {
         waitProcedure(systemNamespaceTableLock, procedure);
         logLockedResource(LockedResourceType.TABLE,
-          TableName.NAMESPACE_TABLE_NAME.getNameAsString());
+          TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME.getNameAsString());
         return true;
       }
 
@@ -840,13 +840,14 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     try {
       final LockAndQueue namespaceLock = locking.getNamespaceLock(namespace);
       final LockAndQueue systemNamespaceTableLock =
-          locking.getTableLock(TableName.NAMESPACE_TABLE_NAME);
+        locking.getTableLock(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
       int waitingCount = 0;
       if (namespaceLock.releaseExclusiveLock(procedure)) {
         waitingCount += wakeWaitingProcedures(namespaceLock);
       }
       if (systemNamespaceTableLock.releaseSharedLock()) {
-        addToRunQueue(tableRunQueue, getTableQueue(TableName.NAMESPACE_TABLE_NAME),
+        addToRunQueue(tableRunQueue,
+          getTableQueue(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME),
           () -> procedure + " released namespace exclusive lock");
         waitingCount += wakeWaitingProcedures(systemNamespaceTableLock);
       }


[47/51] [abbrv] hbase git commit: HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky

Posted by el...@apache.org.
HBASE-21559 The RestoreSnapshotFromClientTestBase related UT are flaky

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfb9ae8e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfb9ae8e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfb9ae8e

Branch: refs/heads/HBASE-20952
Commit: dfb9ae8e0e69ad84962b8768190f6891827767fa
Parents: 170df27
Author: huzheng <op...@gmail.com>
Authored: Thu Dec 6 20:35:30 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Fri Dec 7 08:22:22 2018 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/master/snapshot/SnapshotManager.java    | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfb9ae8e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index ae9b6fb..2b963b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -27,6 +27,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -150,7 +151,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
   // The map is always accessed and modified under the object lock using synchronized.
   // snapshotTable() will insert an Handler in the table.
   // isSnapshotDone() will remove the handler requested if the operation is finished.
-  private Map<TableName, SnapshotSentinel> snapshotHandlers = new HashMap<>();
+  private Map<TableName, SnapshotSentinel> snapshotHandlers = new ConcurrentHashMap<>();
 
   // Restore map, with table name as key, procedure ID as value.
   // The map is always accessed and modified under the object lock using synchronized.
@@ -419,7 +420,7 @@ public class SnapshotManager extends MasterProcedureManager implements Stoppable
    * @param tableName name of the table being snapshotted.
    * @return <tt>true</tt> if there is a snapshot in progress on the specified table.
    */
-  public synchronized boolean isTakingSnapshot(final TableName tableName) {
+  public boolean isTakingSnapshot(final TableName tableName) {
     SnapshotSentinel handler = this.snapshotHandlers.get(tableName);
     return handler != null && !handler.isFinished();
   }


[11/51] [abbrv] hbase git commit: HBASE-21377 Add debug log for procedure stack id related operations

Posted by el...@apache.org.
HBASE-21377 Add debug log for procedure stack id related operations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/83dc38a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/83dc38a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/83dc38a1

Branch: refs/heads/HBASE-20952
Commit: 83dc38a1dffed76ea4d36f18409dd5bc86a3e19e
Parents: b8271c0
Author: Duo Zhang <zh...@apache.org>
Authored: Mon Nov 19 17:11:07 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Mon Nov 19 18:55:41 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/procedure2/RootProcedureState.java  | 5 +++++
 .../hadoop/hbase/procedure2/store/wal/WALProcedureTree.java     | 3 +++
 2 files changed, 8 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/83dc38a1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
index 2fc0030..a7cdaab 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java
@@ -24,6 +24,8 @@ import java.util.List;
 import java.util.Set;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 
@@ -42,6 +44,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedu
 @InterfaceStability.Evolving
 class RootProcedureState<TEnvironment> {
 
+  private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class);
+
   private enum State {
     RUNNING,         // The Procedure is running or ready to run
     FAILED,          // The Procedure failed, waiting for the rollback executing
@@ -146,6 +150,7 @@ class RootProcedureState<TEnvironment> {
       subprocStack = new ArrayList<>();
     }
     proc.addStackIndex(subprocStack.size());
+    LOG.debug("Add procedure {} as the {}th rollback step", proc, subprocStack.size());
     subprocStack.add(proc);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/83dc38a1/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
index c32bd7f..6e624b4 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureTree.java
@@ -145,6 +145,9 @@ public final class WALProcedureTree {
 
   private void collectStackId(Entry entry, Map<Integer, List<Entry>> stackId2Proc,
       MutableInt maxStackId) {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Procedure {} stack ids={}", entry, entry.proc.getStackIdList());
+    }
     for (int i = 0, n = entry.proc.getStackIdCount(); i < n; i++) {
       int stackId = entry.proc.getStackId(i);
       if (stackId > maxStackId.intValue()) {


[19/51] [abbrv] hbase git commit: HBASE-21508 Ignore the reportRegionStateTransition call from a dead server

Posted by el...@apache.org.
HBASE-21508 Ignore the reportRegionStateTransition call from a dead server

Signed-off-by: Guanghao Zhang <zg...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0e3cb6c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0e3cb6c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0e3cb6c

Branch: refs/heads/HBASE-20952
Commit: a0e3cb6c0cddcb27ac82d01b21aeb0f547c6f13c
Parents: 27c0bf5
Author: Duo Zhang <zh...@apache.org>
Authored: Mon Nov 26 11:28:08 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Mon Nov 26 14:32:14 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java |   3 +-
 .../master/assignment/AssignmentManager.java    | 205 ++++++++++---------
 .../hbase/master/assignment/RegionStates.java   |  16 --
 .../hbase/master/assignment/ServerState.java    |   5 +
 .../master/assignment/ServerStateNode.java      |  49 +++--
 .../assignment/TransitRegionStateProcedure.java |   8 +-
 .../master/procedure/ProcedureSyncWait.java     | 105 ++++++----
 .../master/procedure/ServerCrashProcedure.java  |   2 +-
 .../apache/hadoop/hbase/client/TestAdmin2.java  |   4 +-
 .../master/assignment/MockMasterServices.java   |  26 +--
 .../assignment/TestAssignmentManager.java       |   9 -
 .../assignment/TestAssignmentManagerBase.java   |   2 +-
 ...portRegionStateTransitionFromDeadServer.java | 201 ++++++++++++++++++
 13 files changed, 415 insertions(+), 220 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index e1d3740..ae04283 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3800,8 +3800,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     if (offload) {
       final List<ServerName> destServers = this.serverManager.createDestinationServersList();
       for (ServerName server : serversAdded) {
-        final List<RegionInfo> regionsOnServer =
-          this.assignmentManager.getRegionStates().getServerRegionInfoSet(server);
+        final List<RegionInfo> regionsOnServer = this.assignmentManager.getRegionsOnServer(server);
         for (RegionInfo hri : regionsOnServer) {
           ServerName dest = balancer.randomAssignment(hri, destServers);
           if (dest == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
index 37e5f0c..a564ea9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignmentManager.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.master.MetricsAssignmentManager;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.ServerListener;
 import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -99,7 +98,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
  * Unassigns are triggered by DisableTable, Split, Merge
  */
 @InterfaceAudience.Private
-public class AssignmentManager implements ServerListener {
+public class AssignmentManager {
   private static final Logger LOG = LoggerFactory.getLogger(AssignmentManager.class);
 
   // TODO: AMv2
@@ -193,9 +192,6 @@ public class AssignmentManager implements ServerListener {
 
     LOG.trace("Starting assignment manager");
 
-    // Register Server Listener
-    master.getServerManager().registerListener(this);
-
     // Start the Assignment Thread
     startAssignmentThread();
 
@@ -275,9 +271,6 @@ public class AssignmentManager implements ServerListener {
     // Stop the RegionStateStore
     regionStates.clear();
 
-    // Unregister Server Listener
-    master.getServerManager().unregisterListener(this);
-
     // Update meta events (for testing)
     if (hasProcExecutor) {
       metaLoadEvent.suspend();
@@ -319,14 +312,31 @@ public class AssignmentManager implements ServerListener {
     return regionStates;
   }
 
+  /**
+   * Returns the regions hosted by the specified server.
+   * <p/>
+   * Notice that, for SCP, after we submit the SCP, no one can change the region list for the
+   * ServerStateNode so we do not need any locks here. And for other usage, this can only give you a
+   * snapshot of the current region list for this server, which means, right after you get the
+   * region list, new regions may be moved to this server or some regions may be moved out from this
+   * server, so you should not use it critically if you need strong consistency.
+   */
+  public List<RegionInfo> getRegionsOnServer(ServerName serverName) {
+    ServerStateNode serverInfo = regionStates.getServerNode(serverName);
+    if (serverInfo == null) {
+      return Collections.emptyList();
+    }
+    return serverInfo.getRegionInfoList();
+  }
+
   public RegionStateStore getRegionStateStore() {
     return regionStateStore;
   }
 
   public List<ServerName> getFavoredNodes(final RegionInfo regionInfo) {
-    return this.shouldAssignRegionsWithFavoredNodes?
-        ((FavoredStochasticBalancer)getBalancer()).getFavoredNodes(regionInfo):
-          ServerName.EMPTY_SERVER_LIST;
+    return this.shouldAssignRegionsWithFavoredNodes
+      ? ((FavoredStochasticBalancer) getBalancer()).getFavoredNodes(regionInfo)
+      : ServerName.EMPTY_SERVER_LIST;
   }
 
   // ============================================================================================
@@ -522,12 +532,11 @@ public class AssignmentManager implements ServerListener {
   }
 
   private List<RegionInfo> getSystemTables(ServerName serverName) {
-    Set<RegionStateNode> regions = this.getRegionStates().getServerNode(serverName).getRegions();
-    if (regions == null) {
+    ServerStateNode serverNode = regionStates.getServerNode(serverName);
+    if (serverNode == null) {
       return Collections.emptyList();
     }
-    return regions.stream().map(RegionStateNode::getRegionInfo)
-      .filter(r -> r.getTable().isSystemTable()).collect(Collectors.toList());
+    return serverNode.getSystemRegionInfoList();
   }
 
   private void preTransitCheck(RegionStateNode regionNode, RegionState.State[] expectedStates)
@@ -817,54 +826,79 @@ public class AssignmentManager implements ServerListener {
   // ============================================================================================
   //  RS Region Transition Report helpers
   // ============================================================================================
-  // TODO: Move this code in MasterRpcServices and call on specific event?
+  private void reportRegionStateTransition(ReportRegionStateTransitionResponse.Builder builder,
+      ServerName serverName, List<RegionStateTransition> transitionList) throws IOException {
+    for (RegionStateTransition transition : transitionList) {
+      switch (transition.getTransitionCode()) {
+        case OPENED:
+        case FAILED_OPEN:
+        case CLOSED:
+          assert transition.getRegionInfoCount() == 1 : transition;
+          final RegionInfo hri = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
+          updateRegionTransition(serverName, transition.getTransitionCode(), hri,
+            transition.hasOpenSeqNum() ? transition.getOpenSeqNum() : HConstants.NO_SEQNUM);
+          break;
+        case READY_TO_SPLIT:
+        case SPLIT:
+        case SPLIT_REVERTED:
+          assert transition.getRegionInfoCount() == 3 : transition;
+          final RegionInfo parent = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
+          final RegionInfo splitA = ProtobufUtil.toRegionInfo(transition.getRegionInfo(1));
+          final RegionInfo splitB = ProtobufUtil.toRegionInfo(transition.getRegionInfo(2));
+          updateRegionSplitTransition(serverName, transition.getTransitionCode(), parent, splitA,
+            splitB);
+          break;
+        case READY_TO_MERGE:
+        case MERGED:
+        case MERGE_REVERTED:
+          assert transition.getRegionInfoCount() == 3 : transition;
+          final RegionInfo merged = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
+          final RegionInfo mergeA = ProtobufUtil.toRegionInfo(transition.getRegionInfo(1));
+          final RegionInfo mergeB = ProtobufUtil.toRegionInfo(transition.getRegionInfo(2));
+          updateRegionMergeTransition(serverName, transition.getTransitionCode(), merged, mergeA,
+            mergeB);
+          break;
+      }
+    }
+  }
+
   public ReportRegionStateTransitionResponse reportRegionStateTransition(
       final ReportRegionStateTransitionRequest req) throws PleaseHoldException {
-    final ReportRegionStateTransitionResponse.Builder builder =
+    ReportRegionStateTransitionResponse.Builder builder =
         ReportRegionStateTransitionResponse.newBuilder();
-    final ServerName serverName = ProtobufUtil.toServerName(req.getServer());
+    ServerName serverName = ProtobufUtil.toServerName(req.getServer());
+    ServerStateNode serverNode = regionStates.getOrCreateServer(serverName);
+    // here we have to acquire a read lock instead of a simple exclusive lock. This is because that
+    // we should not block other reportRegionStateTransition call from the same region server. This
+    // is not only about performance, but also to prevent dead lock. Think of the meta region is
+    // also on the same region server and you hold the lock which blocks the
+    // reportRegionStateTransition for meta, and since meta is not online, you will block inside the
+    // lock protection to wait for meta online...
+    serverNode.readLock().lock();
     try {
-      for (RegionStateTransition transition: req.getTransitionList()) {
-        switch (transition.getTransitionCode()) {
-          case OPENED:
-          case FAILED_OPEN:
-          case CLOSED:
-            assert transition.getRegionInfoCount() == 1 : transition;
-            final RegionInfo hri = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
-            updateRegionTransition(serverName, transition.getTransitionCode(), hri,
-                transition.hasOpenSeqNum() ? transition.getOpenSeqNum() : HConstants.NO_SEQNUM);
-            break;
-          case READY_TO_SPLIT:
-          case SPLIT:
-          case SPLIT_REVERTED:
-            assert transition.getRegionInfoCount() == 3 : transition;
-            final RegionInfo parent = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
-            final RegionInfo splitA = ProtobufUtil.toRegionInfo(transition.getRegionInfo(1));
-            final RegionInfo splitB = ProtobufUtil.toRegionInfo(transition.getRegionInfo(2));
-            updateRegionSplitTransition(serverName, transition.getTransitionCode(),
-              parent, splitA, splitB);
-            break;
-          case READY_TO_MERGE:
-          case MERGED:
-          case MERGE_REVERTED:
-            assert transition.getRegionInfoCount() == 3 : transition;
-            final RegionInfo merged = ProtobufUtil.toRegionInfo(transition.getRegionInfo(0));
-            final RegionInfo mergeA = ProtobufUtil.toRegionInfo(transition.getRegionInfo(1));
-            final RegionInfo mergeB = ProtobufUtil.toRegionInfo(transition.getRegionInfo(2));
-            updateRegionMergeTransition(serverName, transition.getTransitionCode(),
-              merged, mergeA, mergeB);
-            break;
+      // we only accept reportRegionStateTransition if the region server is online, see the comment
+      // above in submitServerCrash method and HBASE-21508 for more details.
+      if (serverNode.isInState(ServerState.ONLINE)) {
+        try {
+          reportRegionStateTransition(builder, serverName, req.getTransitionList());
+        } catch (PleaseHoldException e) {
+          LOG.trace("Failed transition ", e);
+          throw e;
+        } catch (UnsupportedOperationException | IOException e) {
+          // TODO: at the moment we have a single error message and the RS will abort
+          // if the master says that one of the region transitions failed.
+          LOG.warn("Failed transition", e);
+          builder.setErrorMessage("Failed transition " + e.getMessage());
         }
+      } else {
+        LOG.warn("The region server {} is already dead, skip reportRegionStateTransition call",
+          serverName);
+        builder.setErrorMessage("You are dead");
       }
-    } catch (PleaseHoldException e) {
-      LOG.trace("Failed transition ", e);
-      throw e;
-    } catch (UnsupportedOperationException|IOException e) {
-      // TODO: at the moment we have a single error message and the RS will abort
-      // if the master says that one of the region transitions failed.
-      LOG.warn("Failed transition", e);
-      builder.setErrorMessage("Failed transition " + e.getMessage());
+    } finally {
+      serverNode.readLock().unlock();
     }
+
     return builder.build();
   }
 
@@ -1017,9 +1051,6 @@ public class AssignmentManager implements ServerListener {
     }
     // The Heartbeat tells us of what regions are on the region serve, check the state.
     checkOnlineRegionsReport(serverNode, regionNames);
-
-    // wake report event
-    wakeServerReportEvent(serverNode);
   }
 
   // just check and output possible inconsistency, without actually doing anything
@@ -1061,18 +1092,6 @@ public class AssignmentManager implements ServerListener {
     }
   }
 
-  protected boolean waitServerReportEvent(ServerName serverName, Procedure<?> proc) {
-    final ServerStateNode serverNode = regionStates.getOrCreateServer(serverName);
-    if (serverNode == null) {
-      LOG.warn("serverName=null; {}", proc);
-    }
-    return serverNode.getReportEvent().suspendIfNotReady(proc);
-  }
-
-  protected void wakeServerReportEvent(final ServerStateNode serverNode) {
-    serverNode.getReportEvent().wake(getProcedureScheduler());
-  }
-
   // ============================================================================================
   //  RIT chore
   // ============================================================================================
@@ -1321,13 +1340,27 @@ public class AssignmentManager implements ServerListener {
     return 0;
   }
 
-  public long submitServerCrash(final ServerName serverName, final boolean shouldSplitWal) {
-    boolean carryingMeta = isCarryingMeta(serverName);
-    ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
-    long pid = procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(),
-        serverName, shouldSplitWal, carryingMeta));
-    LOG.debug("Added=" + serverName
-        + " to dead servers, submitted shutdown handler to be executed meta=" + carryingMeta);
+  public long submitServerCrash(ServerName serverName, boolean shouldSplitWal) {
+    boolean carryingMeta;
+    long pid;
+    ServerStateNode serverNode = regionStates.getOrCreateServer(serverName);
+    // we hold the write lock here for fencing on reportRegionStateTransition. Once we set the
+    // server state to CRASHED, we will no longer accept the reportRegionStateTransition call from
+    // this server. This is used to simplify the implementation for TRSP and SCP, where we can make
+    // sure that, the region list fetched by SCP will not be changed any more.
+    serverNode.writeLock().lock();
+    try {
+      serverNode.setState(ServerState.CRASHED);
+      carryingMeta = isCarryingMeta(serverName);
+      ProcedureExecutor<MasterProcedureEnv> procExec = this.master.getMasterProcedureExecutor();
+      pid = procExec.submitProcedure(new ServerCrashProcedure(procExec.getEnvironment(), serverName,
+        shouldSplitWal, carryingMeta));
+    } finally {
+      serverNode.writeLock().unlock();
+    }
+    LOG.info(
+      "Added {} to dead servers which carryingMeta={}, submitted ServerCrashProcedure pid={}",
+      serverName, carryingMeta, pid);
     return pid;
   }
 
@@ -1847,22 +1880,6 @@ public class AssignmentManager implements ServerListener {
         .collect(Collectors.toList());
   }
 
-  // ============================================================================================
-  //  Server Helpers
-  // ============================================================================================
-  @Override
-  public void serverAdded(final ServerName serverName) {
-  }
-
-  @Override
-  public void serverRemoved(final ServerName serverName) {
-    final ServerStateNode serverNode = regionStates.getServerNode(serverName);
-    if (serverNode == null) return;
-
-    // just in case, wake procedures waiting for this server report
-    wakeServerReportEvent(serverNode);
-  }
-
   @VisibleForTesting
   MasterServices getMaster() {
     return master;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
index 2b9c0bd..7b85409 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStates.java
@@ -357,22 +357,6 @@ public class RegionStates {
         ((hri.isOffline() || hri.isSplit()) && offline);
   }
 
-  /**
-   * Returns the set of regions hosted by the specified server
-   * @param serverName the server we are interested in
-   * @return set of RegionInfo hosted by the specified server
-   */
-  public List<RegionInfo> getServerRegionInfoSet(final ServerName serverName) {
-    ServerStateNode serverInfo = getServerNode(serverName);
-    if (serverInfo == null) {
-      return Collections.emptyList();
-    }
-
-    synchronized (serverInfo) {
-      return serverInfo.getRegionInfoList();
-    }
-  }
-
   // ============================================================================================
   // Split helpers
   // These methods will only be called in ServerCrashProcedure, and at the end of SCP we will remove

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java
index 6925c42..3efe6e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerState.java
@@ -30,6 +30,11 @@ enum ServerState {
   ONLINE,
 
   /**
+   * Indicate that the server has crashed, i.e., we have already scheduled a SCP for it.
+   */
+  CRASHED,
+
+  /**
    * Only server which carries meta can have this state. We will split wal for meta and then
    * assign meta first before splitting other wals.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
index 2042214..6f763aa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/ServerStateNode.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.hbase.master.assignment;
 
-import java.util.ArrayList;
+import java.util.List;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -31,23 +34,16 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 class ServerStateNode implements Comparable<ServerStateNode> {
 
-  private static final class ServerReportEvent extends ProcedureEvent<ServerName> {
-    public ServerReportEvent(final ServerName serverName) {
-      super(serverName);
-    }
-  }
-
-  private final ServerReportEvent reportEvent;
-
   private final Set<RegionStateNode> regions;
   private final ServerName serverName;
 
+  private final ReadWriteLock lock = new ReentrantReadWriteLock();
+
   private volatile ServerState state = ServerState.ONLINE;
 
-  public ServerStateNode(final ServerName serverName) {
+  public ServerStateNode(ServerName serverName) {
     this.serverName = serverName;
     this.regions = ConcurrentHashMap.newKeySet();
-    this.reportEvent = new ServerReportEvent(serverName);
   }
 
   public ServerName getServerName() {
@@ -58,10 +54,6 @@ class ServerStateNode implements Comparable<ServerStateNode> {
     return state;
   }
 
-  public ProcedureEvent<?> getReportEvent() {
-    return reportEvent;
-  }
-
   public boolean isInState(final ServerState... expected) {
     boolean expectedState = false;
     if (expected != null) {
@@ -76,20 +68,17 @@ class ServerStateNode implements Comparable<ServerStateNode> {
     this.state = state;
   }
 
-  public Set<RegionStateNode> getRegions() {
-    return regions;
-  }
-
   public int getRegionCount() {
     return regions.size();
   }
 
-  public ArrayList<RegionInfo> getRegionInfoList() {
-    ArrayList<RegionInfo> hris = new ArrayList<RegionInfo>(regions.size());
-    for (RegionStateNode region : regions) {
-      hris.add(region.getRegionInfo());
-    }
-    return hris;
+  public List<RegionInfo> getRegionInfoList() {
+    return regions.stream().map(RegionStateNode::getRegionInfo).collect(Collectors.toList());
+  }
+
+  public List<RegionInfo> getSystemRegionInfoList() {
+    return regions.stream().filter(RegionStateNode::isSystemTable)
+      .map(RegionStateNode::getRegionInfo).collect(Collectors.toList());
   }
 
   public void addRegion(final RegionStateNode regionNode) {
@@ -100,6 +89,14 @@ class ServerStateNode implements Comparable<ServerStateNode> {
     this.regions.remove(regionNode);
   }
 
+  public Lock readLock() {
+    return lock.readLock();
+  }
+
+  public Lock writeLock() {
+    return lock.writeLock();
+  }
+
   @Override
   public int compareTo(final ServerStateNode other) {
     return getServerName().compareTo(other.getServerName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
index 90ebf7b..0885a6a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/TransitRegionStateProcedure.java
@@ -500,11 +500,9 @@ public class TransitRegionStateProcedure
       case REGION_STATE_TRANSITION_CONFIRM_CLOSED:
       case REGION_STATE_TRANSITION_CONFIRM_OPENED:
         // for these 3 states, the region may still be online on the crashed server
-        if (serverName.equals(regionNode.getRegionLocation())) {
-          env.getAssignmentManager().regionClosed(regionNode, false);
-          if (currentState != RegionStateTransitionState.REGION_STATE_TRANSITION_CLOSE) {
-            regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
-          }
+        env.getAssignmentManager().regionClosed(regionNode, false);
+        if (currentState != RegionStateTransitionState.REGION_STATE_TRANSITION_CLOSE) {
+          regionNode.getProcedureEvent().wake(env.getProcedureScheduler());
         }
         break;
       default:

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
index c8ff9f8..9353124 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
@@ -61,51 +61,61 @@ public final class ProcedureSyncWait {
   }
 
   private static class ProcedureFuture implements Future<byte[]> {
-      private final ProcedureExecutor<MasterProcedureEnv> procExec;
-      private final Procedure<?> proc;
+    private final ProcedureExecutor<MasterProcedureEnv> procExec;
+    private final Procedure<?> proc;
 
-      private boolean hasResult = false;
-      private byte[] result = null;
+    private boolean hasResult = false;
+    private byte[] result = null;
 
-      public ProcedureFuture(ProcedureExecutor<MasterProcedureEnv> procExec, Procedure<?> proc) {
-        this.procExec = procExec;
-        this.proc = proc;
-      }
+    public ProcedureFuture(ProcedureExecutor<MasterProcedureEnv> procExec, Procedure<?> proc) {
+      this.procExec = procExec;
+      this.proc = proc;
+    }
 
-      @Override
-      public boolean cancel(boolean mayInterruptIfRunning) { return false; }
+    @Override
+    public boolean cancel(boolean mayInterruptIfRunning) {
+      return false;
+    }
 
-      @Override
-      public boolean isCancelled() { return false; }
+    @Override
+    public boolean isCancelled() {
+      return false;
+    }
 
-      @Override
-      public boolean isDone() { return hasResult; }
+    @Override
+    public boolean isDone() {
+      return hasResult;
+    }
 
-      @Override
-      public byte[] get() throws InterruptedException, ExecutionException {
-        if (hasResult) return result;
-        try {
-          return waitForProcedureToComplete(procExec, proc, Long.MAX_VALUE);
-        } catch (Exception e) {
-          throw new ExecutionException(e);
-        }
+    @Override
+    public byte[] get() throws InterruptedException, ExecutionException {
+      if (hasResult) {
+        return result;
+      }
+      try {
+        return waitForProcedureToComplete(procExec, proc, Long.MAX_VALUE);
+      } catch (Exception e) {
+        throw new ExecutionException(e);
       }
+    }
 
-      @Override
-      public byte[] get(long timeout, TimeUnit unit)
-          throws InterruptedException, ExecutionException, TimeoutException {
-        if (hasResult) return result;
-        try {
-          result = waitForProcedureToComplete(procExec, proc, unit.toMillis(timeout));
-          hasResult = true;
-          return result;
-        } catch (TimeoutIOException e) {
-          throw new TimeoutException(e.getMessage());
-        } catch (Exception e) {
-          throw new ExecutionException(e);
-        }
+    @Override
+    public byte[] get(long timeout, TimeUnit unit)
+        throws InterruptedException, ExecutionException, TimeoutException {
+      if (hasResult) {
+        return result;
+      }
+      try {
+        result = waitForProcedureToComplete(procExec, proc, unit.toMillis(timeout));
+        hasResult = true;
+        return result;
+      } catch (TimeoutIOException e) {
+        throw new TimeoutException(e.getMessage());
+      } catch (Exception e) {
+        throw new ExecutionException(e);
       }
     }
+  }
 
   public static Future<byte[]> submitProcedure(final ProcedureExecutor<MasterProcedureEnv> procExec,
       final Procedure<MasterProcedureEnv> proc) {
@@ -124,9 +134,8 @@ public final class ProcedureSyncWait {
   }
 
   public static byte[] waitForProcedureToCompleteIOE(
-      final ProcedureExecutor<MasterProcedureEnv> procExec,
-      final Procedure<?> proc, final long timeout)
-  throws IOException {
+      final ProcedureExecutor<MasterProcedureEnv> procExec, final Procedure<?> proc,
+      final long timeout) throws IOException {
     try {
       return waitForProcedureToComplete(procExec, proc, timeout);
     } catch (IOException e) {
@@ -139,7 +148,7 @@ public final class ProcedureSyncWait {
   public static byte[] waitForProcedureToComplete(
       final ProcedureExecutor<MasterProcedureEnv> procExec, final Procedure<?> proc,
       final long timeout) throws IOException {
-    waitFor(procExec.getEnvironment(), "pid=" + proc.getProcId(),
+    waitFor(procExec.getEnvironment(), timeout, "pid=" + proc.getProcId(),
       new ProcedureSyncWait.Predicate<Boolean>() {
         @Override
         public Boolean evaluate() throws IOException {
@@ -171,15 +180,25 @@ public final class ProcedureSyncWait {
 
   public static <T> T waitFor(MasterProcedureEnv env, String purpose, Predicate<T> predicate)
       throws IOException {
-    final Configuration conf = env.getMasterConfiguration();
-    final long waitTime = conf.getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
-    final long waitingTimeForEvents = conf.getInt("hbase.master.event.waiting.time", 1000);
+    Configuration conf = env.getMasterConfiguration();
+    long waitTime = conf.getLong("hbase.master.wait.on.region", 5 * 60 * 1000);
+    return waitFor(env, waitTime, purpose, predicate);
+  }
+
+  public static <T> T waitFor(MasterProcedureEnv env, long waitTime, String purpose,
+      Predicate<T> predicate) throws IOException {
+    Configuration conf = env.getMasterConfiguration();
+    long waitingTimeForEvents = conf.getInt("hbase.master.event.waiting.time", 1000);
     return waitFor(env, waitTime, waitingTimeForEvents, purpose, predicate);
   }
 
   public static <T> T waitFor(MasterProcedureEnv env, long waitTime, long waitingTimeForEvents,
       String purpose, Predicate<T> predicate) throws IOException {
-    final long done = EnvironmentEdgeManager.currentTime() + waitTime;
+    long done = EnvironmentEdgeManager.currentTime() + waitTime;
+    if (done <= 0) {
+      // long overflow, usually this means we pass Long.MAX_VALUE as waitTime
+      done = Long.MAX_VALUE;
+    }
     boolean logged = false;
     do {
       T result = predicate.evaluate();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 1fcc6eb..048bca8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -146,7 +146,7 @@ public class ServerCrashProcedure
           break;
         case SERVER_CRASH_GET_REGIONS:
           this.regionsOnCrashedServer =
-            services.getAssignmentManager().getRegionStates().getServerRegionInfoSet(serverName);
+            services.getAssignmentManager().getRegionsOnServer(serverName);
           // Where to go next? Depends on whether we should split logs at all or
           // if we should do distributed log splitting.
           if (!this.shouldSplitWal) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 4ab1a8f..97c7b9b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -713,12 +713,12 @@ public class TestAdmin2 {
     assertEquals(3, clusterRegionServers.size());
 
     HashMap<ServerName, List<RegionInfo>> serversToDecommssion = new HashMap<>();
-    // Get a server that has regions. We will decommission two of the servers,
+    // Get a server that has meta online. We will decommission two of the servers,
     // leaving one online.
     int i;
     for (i = 0; i < clusterRegionServers.size(); i++) {
       List<RegionInfo> regionsOnServer = admin.getRegions(clusterRegionServers.get(i));
-      if (regionsOnServer.size() > 0) {
+      if (admin.getRegions(clusterRegionServers.get(i)).stream().anyMatch(p -> p.isMetaRegion())) {
         serversToDecommssion.put(clusterRegionServers.get(i), regionsOnServer);
         break;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
index 5a1f87d..56467cc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/MockMasterServices.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master.assignment;
 import static org.mockito.ArgumentMatchers.any;
 
 import java.io.IOException;
-import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
@@ -28,7 +27,6 @@ import java.util.SortedSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerMetricsBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -51,7 +49,6 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher;
-import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -93,19 +90,15 @@ public class MockMasterServices extends MockNoopMasterServices {
   private final ClusterConnection connection;
   private final LoadBalancer balancer;
   private final ServerManager serverManager;
-  // Set of regions on a 'server'. Populated externally. Used in below faking 'cluster'.
-  private final NavigableMap<ServerName, SortedSet<byte []>> regionsToRegionServers;
 
-  private final ProcedureEvent initialized = new ProcedureEvent("master initialized");
+  private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized");
   public static final String DEFAULT_COLUMN_FAMILY_NAME = "cf";
   public static final ServerName MOCK_MASTER_SERVERNAME =
       ServerName.valueOf("mockmaster.example.org", 1234, -1L);
 
   public MockMasterServices(Configuration conf,
-      NavigableMap<ServerName, SortedSet<byte []>> regionsToRegionServers)
-  throws IOException {
+      NavigableMap<ServerName, SortedSet<byte[]>> regionsToRegionServers) throws IOException {
     super(conf);
-    this.regionsToRegionServers = regionsToRegionServers;
     Superusers.initialize(conf);
     this.fileSystemManager = new MasterFileSystem(conf);
     this.walManager = new MasterWalManager(this);
@@ -120,15 +113,6 @@ public class MockMasterServices extends MockNoopMasterServices {
       public boolean isTableDisabled(final TableName tableName) {
         return false;
       }
-
-      @Override
-      protected boolean waitServerReportEvent(ServerName serverName, Procedure proc) {
-        // Make a report with current state of the server 'serverName' before we call wait..
-        SortedSet<byte[]> regions = regionsToRegionServers.get(serverName);
-        getAssignmentManager().reportOnlineRegions(serverName,
-          regions == null ? new HashSet<byte[]>() : regions);
-        return super.waitServerReportEvent(serverName, proc);
-      }
     };
     this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
     this.serverManager = new ServerManager(this);
@@ -176,7 +160,7 @@ public class MockMasterServices extends MockNoopMasterServices {
     this.assignmentManager.start();
     for (int i = 0; i < numServes; ++i) {
       ServerName sn = ServerName.valueOf("localhost", 100 + i, 1);
-      serverManager.regionServerReport(sn, new ServerLoad(ServerMetricsBuilder.of(sn)));
+      serverManager.regionServerReport(sn, ServerMetricsBuilder.of(sn));
     }
     this.procedureExecutor.getEnvironment().setEventReady(initialized, true);
   }
@@ -202,7 +186,7 @@ public class MockMasterServices extends MockNoopMasterServices {
       return;
     }
     ServerName sn = ServerName.valueOf(serverName.getAddress().toString(), startCode);
-    serverManager.regionServerReport(sn, new ServerLoad(ServerMetricsBuilder.of(sn)));
+    serverManager.regionServerReport(sn, ServerMetricsBuilder.of(sn));
   }
 
   @Override
@@ -260,7 +244,7 @@ public class MockMasterServices extends MockNoopMasterServices {
   }
 
   @Override
-  public ProcedureEvent getInitializedEvent() {
+  public ProcedureEvent<?> getInitializedEvent() {
     return this.initialized;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index 94963a0..5ec7cc6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.exceptions.UnexpectedStateException;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -49,14 +48,6 @@ public class TestAssignmentManager extends TestAssignmentManagerBase {
 
   private static final Logger LOG = LoggerFactory.getLogger(TestAssignmentManager.class);
 
-  @Test(expected = NullPointerException.class)
-  public void testWaitServerReportEventWithNullServer() throws UnexpectedStateException {
-    // Test what happens if we pass in null server. I'd expect it throws NPE.
-    if (this.am.waitServerReportEvent(null, null)) {
-      throw new UnexpectedStateException();
-    }
-  }
-
   @Test
   public void testAssignWithGoodExec() throws Exception {
     // collect AM metrics before test

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
index 5f5a576..7ab37bc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManagerBase.java
@@ -186,7 +186,7 @@ public abstract class TestAssignmentManagerBase {
 
   protected byte[] waitOnFuture(final Future<byte[]> future) throws Exception {
     try {
-      return future.get(5, TimeUnit.SECONDS);
+      return future.get(60, TimeUnit.SECONDS);
     } catch (ExecutionException e) {
       LOG.info("ExecutionException", e);
       Exception ee = (Exception) e.getCause();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0e3cb6c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
new file mode 100644
index 0000000..6c9e5eb
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestReportRegionStateTransitionFromDeadServer.java
@@ -0,0 +1,201 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.assignment;
+
+import static org.junit.Assert.assertNotEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.zookeeper.KeeperException;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
+
+@Category({ MasterTests.class, MediumTests.class })
+public class TestReportRegionStateTransitionFromDeadServer {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestReportRegionStateTransitionFromDeadServer.class);
+
+  private static final List<ServerName> EXCLUDE_SERVERS = new ArrayList<>();
+
+  private static CountDownLatch ARRIVE_GET_REGIONS;
+  private static CountDownLatch RESUME_GET_REGIONS;
+  private static CountDownLatch ARRIVE_REPORT;
+  private static CountDownLatch RESUME_REPORT;
+
+  private static final class ServerManagerForTest extends ServerManager {
+
+    public ServerManagerForTest(MasterServices master) {
+      super(master);
+    }
+
+    @Override
+    public List<ServerName> createDestinationServersList() {
+      return super.createDestinationServersList(EXCLUDE_SERVERS);
+    }
+  }
+
+  private static final class AssignmentManagerForTest extends AssignmentManager {
+
+    public AssignmentManagerForTest(MasterServices master) {
+      super(master);
+    }
+
+    @Override
+    public List<RegionInfo> getRegionsOnServer(ServerName serverName) {
+      List<RegionInfo> regions = super.getRegionsOnServer(serverName);
+      if (ARRIVE_GET_REGIONS != null) {
+        ARRIVE_GET_REGIONS.countDown();
+        try {
+          RESUME_GET_REGIONS.await();
+        } catch (InterruptedException e) {
+        }
+      }
+      return regions;
+    }
+
+    @Override
+    public ReportRegionStateTransitionResponse reportRegionStateTransition(
+        ReportRegionStateTransitionRequest req) throws PleaseHoldException {
+      if (ARRIVE_REPORT != null && req.getTransitionList().stream()
+        .allMatch(t -> !ProtobufUtil.toRegionInfo(t.getRegionInfo(0)).isMetaRegion())) {
+        ARRIVE_REPORT.countDown();
+        try {
+          RESUME_REPORT.await();
+        } catch (InterruptedException e) {
+        }
+      }
+      return super.reportRegionStateTransition(req);
+    }
+  }
+
+  public static final class HMasterForTest extends HMaster {
+
+    public HMasterForTest(Configuration conf) throws IOException, KeeperException {
+      super(conf);
+    }
+
+    @Override
+    protected AssignmentManager createAssignmentManager(MasterServices master) {
+      return new AssignmentManagerForTest(master);
+    }
+
+    @Override
+    protected ServerManager createServerManager(MasterServices master) throws IOException {
+      setupClusterConnection();
+      return new ServerManagerForTest(master);
+    }
+  }
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static TableName NAME = TableName.valueOf("Report");
+
+  private static byte[] CF = Bytes.toBytes("cf");
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.getConfiguration().setClass(HConstants.MASTER_IMPL, HMasterForTest.class, HMaster.class);
+    UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 1000);
+    UTIL.startMiniCluster(3);
+    UTIL.getAdmin().balancerSwitch(false, true);
+    UTIL.createTable(NAME, CF);
+    UTIL.waitTableAvailable(NAME);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws HBaseIOException, InterruptedException, ExecutionException {
+    RegionInfo region = UTIL.getMiniHBaseCluster().getRegions(NAME).get(0).getRegionInfo();
+    AssignmentManager am = UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
+    RegionStateNode rsn = am.getRegionStates().getRegionStateNode(region);
+
+    // move from rs0 to rs1, and then kill rs0. Later add rs1 to exclude servers, and at last verify
+    // that the region should not be on rs1 and rs2 both.
+    HRegionServer rs0 = UTIL.getMiniHBaseCluster().getRegionServer(rsn.getRegionLocation());
+    HRegionServer rs1 = UTIL.getOtherRegionServer(rs0);
+    HRegionServer rs2 = UTIL.getMiniHBaseCluster().getRegionServerThreads().stream()
+      .map(t -> t.getRegionServer()).filter(rs -> rs != rs0 && rs != rs1).findAny().get();
+
+    RESUME_REPORT = new CountDownLatch(1);
+    ARRIVE_REPORT = new CountDownLatch(1);
+    Future<?> future =
+      am.moveAsync(new RegionPlan(region, rs0.getServerName(), rs1.getServerName()));
+    ARRIVE_REPORT.await();
+
+    RESUME_GET_REGIONS = new CountDownLatch(1);
+    ARRIVE_GET_REGIONS = new CountDownLatch(1);
+    rs0.abort("For testing!");
+
+    ARRIVE_GET_REGIONS.await();
+    RESUME_REPORT.countDown();
+
+    try {
+      future.get(15, TimeUnit.SECONDS);
+    } catch (TimeoutException e) {
+      // after the fix in HBASE-21508 we will get this exception as the TRSP can not be finished any
+      // more before SCP interrupts it. It's OK.
+    }
+
+    EXCLUDE_SERVERS.add(rs1.getServerName());
+    RESUME_GET_REGIONS.countDown();
+    // wait until there are no running procedures, no SCP and no TRSP
+    UTIL.waitFor(30000, () -> UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor()
+      .getActiveProcIds().isEmpty());
+    boolean onRS1 = !rs1.getRegions(NAME).isEmpty();
+    boolean onRS2 = !rs2.getRegions(NAME).isEmpty();
+    assertNotEquals(
+      "should either be on rs1 or rs2, but onRS1 is " + onRS1 + " and on RS2 is " + onRS2, onRS1,
+      onRS2);
+  }
+}


[20/51] [abbrv] hbase git commit: HBASE-21154 Remove hbase:namespace table; fold it into hbase:meta

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
index d3f982f..e3327e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyNamespaceProcedure.java
@@ -15,19 +15,17 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
-
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.apache.hadoop.hbase.master.TableNamespaceManager;
-import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyNamespaceState;
@@ -42,11 +40,9 @@ public class ModifyNamespaceProcedure
 
   private NamespaceDescriptor oldNsDescriptor;
   private NamespaceDescriptor newNsDescriptor;
-  private Boolean traceEnabled;
 
   public ModifyNamespaceProcedure() {
     this.oldNsDescriptor = null;
-    this.traceEnabled = null;
   }
 
   public ModifyNamespaceProcedure(final MasterProcedureEnv env,
@@ -59,36 +55,31 @@ public class ModifyNamespaceProcedure
     super(env, latch);
     this.oldNsDescriptor = null;
     this.newNsDescriptor = newNsDescriptor;
-    this.traceEnabled = null;
   }
 
   @Override
   protected Flow executeFromState(final MasterProcedureEnv env, final ModifyNamespaceState state)
       throws InterruptedException {
-    if (isTraceEnabled()) {
-      LOG.trace(this + " execute state=" + state);
-    }
-
+    LOG.trace("{} execute state={}", this, state);
     try {
       switch (state) {
-      case MODIFY_NAMESPACE_PREPARE:
-        boolean success = prepareModify(env);
-        releaseSyncLatch();
-        if (!success) {
-          assert isFailed() : "Modify namespace should have an exception here";
+        case MODIFY_NAMESPACE_PREPARE:
+          boolean success = prepareModify(env);
+          releaseSyncLatch();
+          if (!success) {
+            assert isFailed() : "Modify namespace should have an exception here";
+            return Flow.NO_MORE_STATE;
+          }
+          setNextState(ModifyNamespaceState.MODIFY_NAMESPACE_UPDATE_NS_TABLE);
+          break;
+        case MODIFY_NAMESPACE_UPDATE_NS_TABLE:
+          addOrUpdateNamespace(env, newNsDescriptor);
           return Flow.NO_MORE_STATE;
-        }
-        setNextState(ModifyNamespaceState.MODIFY_NAMESPACE_UPDATE_NS_TABLE);
-        break;
-      case MODIFY_NAMESPACE_UPDATE_NS_TABLE:
-        insertIntoNSTable(env);
-        setNextState(ModifyNamespaceState.MODIFY_NAMESPACE_UPDATE_ZK);
-        break;
-      case MODIFY_NAMESPACE_UPDATE_ZK:
-        updateZKNamespaceManager(env);
-        return Flow.NO_MORE_STATE;
-      default:
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
+        case MODIFY_NAMESPACE_UPDATE_ZK:
+          // not used any more
+          return Flow.NO_MORE_STATE;
+        default:
+          throw new UnsupportedOperationException(this + " unhandled state=" + state);
       }
     } catch (IOException e) {
       if (isRollbackSupported(state)) {
@@ -116,7 +107,7 @@ public class ModifyNamespaceProcedure
   }
 
   @Override
-  protected boolean isRollbackSupported(final ModifyNamespaceState state) {
+  protected boolean isRollbackSupported(ModifyNamespaceState state) {
     switch (state) {
       case MODIFY_NAMESPACE_PREPARE:
         return true;
@@ -126,12 +117,12 @@ public class ModifyNamespaceProcedure
   }
 
   @Override
-  protected ModifyNamespaceState getState(final int stateId) {
-    return ModifyNamespaceState.valueOf(stateId);
+  protected ModifyNamespaceState getState(int stateId) {
+    return ModifyNamespaceState.forNumber(stateId);
   }
 
   @Override
-  protected int getStateId(final ModifyNamespaceState state) {
+  protected int getStateId(ModifyNamespaceState state) {
     return state.getNumber();
   }
 
@@ -141,13 +132,12 @@ public class ModifyNamespaceProcedure
   }
 
   @Override
-  protected void serializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.serializeStateData(serializer);
 
     MasterProcedureProtos.ModifyNamespaceStateData.Builder modifyNamespaceMsg =
-        MasterProcedureProtos.ModifyNamespaceStateData.newBuilder().setNamespaceDescriptor(
-          ProtobufUtil.toProtoNamespaceDescriptor(this.newNsDescriptor));
+      MasterProcedureProtos.ModifyNamespaceStateData.newBuilder()
+        .setNamespaceDescriptor(ProtobufUtil.toProtoNamespaceDescriptor(this.newNsDescriptor));
     if (this.oldNsDescriptor != null) {
       modifyNamespaceMsg.setUnmodifiedNamespaceDescriptor(
         ProtobufUtil.toProtoNamespaceDescriptor(this.oldNsDescriptor));
@@ -156,17 +146,16 @@ public class ModifyNamespaceProcedure
   }
 
   @Override
-  protected void deserializeStateData(ProcedureStateSerializer serializer)
-      throws IOException {
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.deserializeStateData(serializer);
 
     MasterProcedureProtos.ModifyNamespaceStateData modifyNamespaceMsg =
-        serializer.deserialize(MasterProcedureProtos.ModifyNamespaceStateData.class);
+      serializer.deserialize(MasterProcedureProtos.ModifyNamespaceStateData.class);
     newNsDescriptor =
-        ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getNamespaceDescriptor());
+      ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getNamespaceDescriptor());
     if (modifyNamespaceMsg.hasUnmodifiedNamespaceDescriptor()) {
       oldNsDescriptor =
-          ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getUnmodifiedNamespaceDescriptor());
+        ProtobufUtil.toNamespaceDescriptor(modifyNamespaceMsg.getUnmodifiedNamespaceDescriptor());
     }
   }
 
@@ -187,8 +176,8 @@ public class ModifyNamespaceProcedure
    */
   private boolean prepareModify(final MasterProcedureEnv env) throws IOException {
     if (getTableNamespaceManager(env).doesNamespaceExist(newNsDescriptor.getName()) == false) {
-      setFailure("master-modify-namespace", new NamespaceNotFoundException(
-            newNsDescriptor.getName()));
+      setFailure("master-modify-namespace",
+        new NamespaceNotFoundException(newNsDescriptor.getName()));
       return false;
     }
     try {
@@ -202,38 +191,4 @@ public class ModifyNamespaceProcedure
     oldNsDescriptor = getTableNamespaceManager(env).get(newNsDescriptor.getName());
     return true;
   }
-
-  /**
-   * Insert/update the row into namespace table
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void insertIntoNSTable(final MasterProcedureEnv env) throws IOException {
-    getTableNamespaceManager(env).insertIntoNSTable(newNsDescriptor);
-  }
-
-  /**
-   * Update ZooKeeper.
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void updateZKNamespaceManager(final MasterProcedureEnv env) throws IOException {
-    getTableNamespaceManager(env).updateZKNamespaceManager(newNsDescriptor);
-  }
-
-  private TableNamespaceManager getTableNamespaceManager(final MasterProcedureEnv env) {
-    return env.getMasterServices().getClusterSchema().getTableNamespaceManager();
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @return traceEnabled
-   */
-  private Boolean isTraceEnabled() {
-    if (traceEnabled == null) {
-      traceEnabled = LOG.isTraceEnabled();
-    }
-    return traceEnabled;
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
index 4fd582d..7e47586 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -26,6 +26,13 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public interface TableProcedureInterface {
+
+  /**
+   * Used for acquire/release lock for namespace related operations, just a place holder as we do
+   * not have namespace table any more.
+   */
+  public static final TableName DUMMY_NAMESPACE_TABLE_NAME = TableName.NAMESPACE_TABLE_NAME;
+
   public enum TableOperationType {
     CREATE, DELETE, DISABLE, EDIT, ENABLE, READ,
     REGION_EDIT, REGION_SPLIT, REGION_MERGE, REGION_ASSIGN, REGION_UNASSIGN,

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
index 81c883b..6fb147e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableQueue.java
@@ -53,8 +53,8 @@ class TableQueue extends Queue<TableName> {
       case ENABLE:
         return true;
       case EDIT:
-        // we allow concurrent edit on the NS table
-        return !proc.getTableName().equals(TableName.NAMESPACE_TABLE_NAME);
+        // we allow concurrent edit on the ns family in meta table
+        return !proc.getTableName().equals(TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME);
       case READ:
         return false;
       // region operations are using the shared-lock on the table

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index c10a824..4b6da53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -95,7 +95,6 @@ import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionTooBusyException;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.UnknownScannerException;
@@ -8298,8 +8297,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
    */
   public byte[] checkSplit() {
     // Can't split META
-    if (this.getRegionInfo().isMetaRegion() ||
-        TableName.NAMESPACE_TABLE_NAME.equals(this.getRegionInfo().getTable())) {
+    if (this.getRegionInfo().isMetaRegion()) {
       if (shouldForceSplit()) {
         LOG.warn("Cannot split meta region in HBase 0.20 and above");
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 24743b9..c6e3eee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -779,13 +779,10 @@ public class HRegionServer extends HasThread implements
   }
 
   /**
-   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to
-   * the local server; i.e. a short-circuit Connection. Safe to use going to local or remote
-   * server. Create this instance in a method can be intercepted and mocked in tests.
-   * @throws IOException
+   * Create a 'smarter' Connection, one that is capable of by-passing RPC if the request is to the
+   * local server; i.e. a short-circuit Connection. Safe to use going to local or remote server.
    */
-  @VisibleForTesting
-  protected ClusterConnection createClusterConnection() throws IOException {
+  private ClusterConnection createClusterConnection() throws IOException {
     Configuration conf = this.conf;
     if (conf.get(HConstants.CLIENT_ZOOKEEPER_QUORUM) != null) {
       // Use server ZK cluster for server-issued connections, so we clone
@@ -796,8 +793,15 @@ public class HRegionServer extends HasThread implements
     // Create a cluster connection that when appropriate, can short-circuit and go directly to the
     // local server if the request is to the local server bypassing RPC. Can be used for both local
     // and remote invocations.
-    return ConnectionUtils.createShortCircuitConnection(conf, null, userProvider.getCurrent(),
-      serverName, rpcServices, rpcServices);
+    ClusterConnection conn = ConnectionUtils.createShortCircuitConnection(conf, null,
+      userProvider.getCurrent(), serverName, rpcServices, rpcServices);
+    // This is used to initialize the batch thread pool inside the connection implementation.
+    // When deploy a fresh cluster, we may first use the cluster connection in InitMetaProcedure,
+    // which will be executed inside the PEWorker, and then the batch thread pool will inherit the
+    // thread group of PEWorker, which will be destroy when shutting down the ProcedureExecutor. It
+    // will cause lots of procedure related UTs to fail, so here let's initialize it first, no harm.
+    conn.getTable(TableName.META_TABLE_NAME).close();
+    return conn;
   }
 
   /**
@@ -823,7 +827,6 @@ public class HRegionServer extends HasThread implements
 
   /**
    * Setup our cluster connection if not already initialized.
-   * @throws IOException
    */
   protected synchronized void setupClusterConnection() throws IOException {
     if (clusterConnection == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 1a84bfd..835fc0d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -875,7 +875,7 @@ public class AccessController implements MasterCoprocessor, RegionCoprocessor,
     // wait till the acl table is created.
     if (AccessControlLists.isAclTable(desc)) {
       this.aclTabAvailable = true;
-    } else if (!(TableName.NAMESPACE_TABLE_NAME.equals(desc.getTableName()))) {
+    } else {
       if (!aclTabAvailable) {
         LOG.warn("Not adding owner permission for table " + desc.getTableName() + ". "
             + AccessControlLists.ACL_TABLE_NAME + " is not yet created. "

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
index 28c7ec3..3dce0de 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSTableDescriptors.java
@@ -178,6 +178,17 @@ public class FSTableDescriptors implements TableDescriptors {
         // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
         .setBloomFilterType(BloomType.NONE)
         .build())
+      .setColumnFamily(ColumnFamilyDescriptorBuilder
+        .newBuilder(HConstants.NAMESPACE_FAMILY)
+        .setMaxVersions(conf.getInt(HConstants.HBASE_META_VERSIONS,
+                HConstants.DEFAULT_HBASE_META_VERSIONS))
+        .setInMemory(true)
+        .setBlocksize(conf.getInt(HConstants.HBASE_META_BLOCK_SIZE,
+                HConstants.DEFAULT_HBASE_META_BLOCK_SIZE))
+        .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+        // Disable blooms for meta.  Needs work.  Seems to mess w/ getClosestOrBefore.
+        .setBloomFilterType(BloomType.NONE)
+        .build())
       .setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(
         MultiRowMutationEndpoint.class.getName())
         .setPriority(Coprocessor.PRIORITY_SYSTEM).build());

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 3f383d6..655bbdb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3436,7 +3436,6 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
    */
   public void waitUntilAllSystemRegionsAssigned() throws IOException {
     waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
-    waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
index 9aecf06..44b06de 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
@@ -58,8 +58,8 @@ public class TestGlobalMemStoreSize {
   private static final Logger LOG = LoggerFactory.getLogger(TestGlobalMemStoreSize.class);
   private static int regionServerNum = 4;
   private static int regionNum = 16;
-  // total region num = region num + root and meta regions
-  private static int totalRegionNum = regionNum+2;
+  // total region num = region num + meta regions
+  private static int totalRegionNum = regionNum + 1;
 
   private HBaseTestingUtility TEST_UTIL;
   private MiniHBaseCluster cluster;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
index c9db891..5d5bc50 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -63,7 +62,6 @@ public class TestNamespace {
   private static HBaseTestingUtility TEST_UTIL;
   protected static Admin admin;
   protected static HBaseCluster cluster;
-  private static ZKNamespaceManager zkNamespaceManager;
   private String prefix = "TestNamespace";
 
   @Rule
@@ -76,9 +74,6 @@ public class TestNamespace {
     admin = TEST_UTIL.getAdmin();
     cluster = TEST_UTIL.getHBaseCluster();
     master = ((MiniHBaseCluster)cluster).getMaster();
-    zkNamespaceManager =
-        new ZKNamespaceManager(master.getZooKeeper());
-    zkNamespaceManager.start();
     LOG.info("Done initializing cluster");
   }
 
@@ -107,19 +102,16 @@ public class TestNamespace {
         admin.getNamespaceDescriptor(NamespaceDescriptor.DEFAULT_NAMESPACE.getName());
     assertNotNull(ns);
     assertEquals(ns.getName(), NamespaceDescriptor.DEFAULT_NAMESPACE.getName());
-    assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR));
 
     ns = admin.getNamespaceDescriptor(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
     assertNotNull(ns);
     assertEquals(ns.getName(), NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
-    assertNotNull(zkNamespaceManager.get(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR));
 
     assertEquals(2, admin.listNamespaceDescriptors().length);
 
     //verify existence of system tables
     Set<TableName> systemTables = Sets.newHashSet(
-        TableName.META_TABLE_NAME,
-        TableName.NAMESPACE_TABLE_NAME);
+        TableName.META_TABLE_NAME);
     HTableDescriptor[] descs =
         admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
     assertEquals(systemTables.size(), descs.length);
@@ -181,18 +173,9 @@ public class TestNamespace {
     //create namespace and verify
     admin.createNamespace(NamespaceDescriptor.create(nsName).build());
     assertEquals(3, admin.listNamespaceDescriptors().length);
-    TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return zkNamespaceManager.list().size() == 3;
-      }
-    });
-    assertNotNull(zkNamespaceManager.get(nsName));
     //remove namespace and verify
     admin.deleteNamespace(nsName);
     assertEquals(2, admin.listNamespaceDescriptors().length);
-    assertEquals(2, zkNamespaceManager.list().size());
-    assertNull(zkNamespaceManager.get(nsName));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java
index f74b095..8de32b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNamespaceAdminApi.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -29,14 +27,10 @@ import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceExistException;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.ZKNamespaceManager;
-import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.BeforeClass;
@@ -58,8 +52,6 @@ public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
       HBaseClassTestRule.forClass(TestAsyncNamespaceAdminApi.class);
 
   private String prefix = "TestNamespace";
-  private static HMaster master;
-  private static ZKNamespaceManager zkNamespaceManager;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -69,9 +61,6 @@ public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
     TEST_UTIL.getConfiguration().setInt(START_LOG_ERRORS_AFTER_COUNT_KEY, 0);
     TEST_UTIL.startMiniCluster(1);
     ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration()).get();
-    master = TEST_UTIL.getHBaseCluster().getMaster();
-    zkNamespaceManager = new ZKNamespaceManager(master.getZooKeeper());
-    zkNamespaceManager.start();
     LOG.info("Done initializing cluster");
   }
 
@@ -83,18 +72,9 @@ public class TestAsyncNamespaceAdminApi extends TestAsyncAdminBase {
     // create namespace and verify
     admin.createNamespace(NamespaceDescriptor.create(nsName).build()).join();
     assertEquals(3, admin.listNamespaceDescriptors().get().size());
-    TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        return zkNamespaceManager.list().size() == 3;
-      }
-    });
-    assertNotNull(zkNamespaceManager.get(nsName));
     // delete namespace and verify
     admin.deleteNamespace(nsName).join();
     assertEquals(2, admin.listNamespaceDescriptors().get().size());
-    assertEquals(2, zkNamespaceManager.list().size());
-    assertNull(zkNamespaceManager.get(nsName));
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index c4285b4..65bc3f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -6396,7 +6396,7 @@ public class TestFromClientSide {
     scan.setCaching(1);
     // Filter out any records
     scan.setFilter(new FilterList(new FirstKeyOnlyFilter(), new InclusiveStopFilter(new byte[0])));
-    try (Table table = TEST_UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME)) {
+    try (Table table = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME)) {
       try (ResultScanner s = table.getScanner(scan)) {
         assertNull(s.next());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
index a11064d..388c53d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/AbstractTestDLS.java
@@ -505,19 +505,19 @@ public abstract class AbstractTestDLS {
     LOG.debug("Waiting for no more RIT\n");
     blockUntilNoRIT(zkw, master);
     NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    LOG.debug("Verifying only catalog and namespace regions are assigned\n");
-    if (regions.size() != 2) {
+    LOG.debug("Verifying only catalog region is assigned\n");
+    if (regions.size() != 1) {
       for (String oregion : regions)
         LOG.debug("Region still online: " + oregion);
     }
-    assertEquals(2 + existingRegions, regions.size());
+    assertEquals(1 + existingRegions, regions.size());
     LOG.debug("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     LOG.debug("Waiting for no more RIT\n");
     blockUntilNoRIT(zkw, master);
     LOG.debug("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    assertEquals(numRegions + 2 + existingRegions, regions.size());
+    assertEquals(numRegions + 1 + existingRegions, regions.size());
     return table;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
index b079501..75d9ee1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CompatibilityFactory;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -42,40 +41,51 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
 
-@Category({MasterTests.class, MediumTests.class})
+@Category({ MasterTests.class, MediumTests.class })
 public class TestMasterMetrics {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestMasterMetrics.class);
+    HBaseClassTestRule.forClass(TestMasterMetrics.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestMasterMetrics.class);
-  private static final MetricsAssertHelper metricsHelper = CompatibilityFactory
-      .getInstance(MetricsAssertHelper.class);
+  private static final MetricsAssertHelper metricsHelper =
+    CompatibilityFactory.getInstance(MetricsAssertHelper.class);
 
   private static MiniHBaseCluster cluster;
   private static HMaster master;
-  private static HBaseTestingUtility TEST_UTIL;
+  private static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
 
   public static class MyMaster extends HMaster {
+
     public MyMaster(Configuration conf) throws IOException, KeeperException, InterruptedException {
       super(conf);
     }
+
     @Override
-    protected void tryRegionServerReport(
-        long reportStartTime, long reportEndTime) {
+    protected void tryRegionServerReport(long reportStartTime, long reportEndTime) {
       // do nothing
     }
+  }
+
+  public static class MyRegionServer extends MiniHBaseCluster.MiniHBaseClusterRegionServer {
+
+    public MyRegionServer(Configuration conf) throws IOException, InterruptedException {
+      super(conf);
+    }
 
+    @Override
+    protected void tryRegionServerReport(long reportStartTime, long reportEndTime) {
+      // do nothing
+    }
   }
 
   @BeforeClass
   public static void startCluster() throws Exception {
     LOG.info("Starting cluster");
-    TEST_UTIL = new HBaseTestingUtility();
     // Set master class and use default values for other options.
-    StartMiniClusterOption option = StartMiniClusterOption.builder()
-        .masterClass(MyMaster.class).build();
+    StartMiniClusterOption option = StartMiniClusterOption.builder().masterClass(MyMaster.class)
+      .rsClass(MyRegionServer.class).build();
     TEST_UTIL.startMiniCluster(option);
     cluster = TEST_UTIL.getHBaseCluster();
     LOG.info("Waiting for active/ready master");
@@ -85,61 +95,44 @@ public class TestMasterMetrics {
 
   @AfterClass
   public static void after() throws Exception {
-    if (TEST_UTIL != null) {
-      TEST_UTIL.shutdownMiniCluster();
-    }
+    master.stopMaster();
+    TEST_UTIL.shutdownMiniCluster();
   }
 
   @Test
   public void testClusterRequests() throws Exception {
-
     // sending fake request to master to see how metric value has changed
-
     RegionServerStatusProtos.RegionServerReportRequest.Builder request =
-        RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
+      RegionServerStatusProtos.RegionServerReportRequest.newBuilder();
     ServerName serverName = cluster.getMaster(0).getServerName();
     request.setServer(ProtobufUtil.toServerName(serverName));
     long expectedRequestNumber = 10000;
 
     MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
     ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
-                                           .setTotalNumberOfRequests(expectedRequestNumber)
-                                           .build();
+      .setTotalNumberOfRequests(expectedRequestNumber).build();
     request.setLoad(sl);
 
     master.getMasterRpcServices().regionServerReport(null, request.build());
-    boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
-    if (tablesOnMaster) {
-      metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
-    } else {
-      metricsHelper.assertCounterGt("cluster_requests", expectedRequestNumber, masterSource);
-
-    }
+    metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
 
     expectedRequestNumber = 15000;
 
-    sl = ClusterStatusProtos.ServerLoad.newBuilder()
-        .setTotalNumberOfRequests(expectedRequestNumber)
-        .build();
+    sl = ClusterStatusProtos.ServerLoad.newBuilder().setTotalNumberOfRequests(expectedRequestNumber)
+      .build();
     request.setLoad(sl);
 
     master.getMasterRpcServices().regionServerReport(null, request.build());
-    if (tablesOnMaster) {
-      metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
-    } else {
-      metricsHelper.assertCounterGt("cluster_requests", expectedRequestNumber, masterSource);
-    }
-
-    master.stopMaster();
+    metricsHelper.assertCounter("cluster_requests", expectedRequestNumber, masterSource);
   }
 
   @Test
   public void testDefaultMasterMetrics() throws Exception {
     MetricsMasterSource masterSource = master.getMasterMetrics().getMetricsSource();
     boolean tablesOnMaster = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration());
-    metricsHelper.assertGauge( "numRegionServers",1 + (tablesOnMaster? 1: 0), masterSource);
-    metricsHelper.assertGauge( "averageLoad", 1 + (tablesOnMaster? 0: 1), masterSource);
-    metricsHelper.assertGauge( "numDeadRegionServers", 0, masterSource);
+    metricsHelper.assertGauge("numRegionServers", 1 + (tablesOnMaster ? 1 : 0), masterSource);
+    metricsHelper.assertGauge("averageLoad", 1, masterSource);
+    metricsHelper.assertGauge("numDeadRegionServers", 0, masterSource);
 
     metricsHelper.assertGauge("masterStartTime", master.getMasterStartTime(), masterSource);
     metricsHelper.assertGauge("masterActiveTime", master.getMasterActiveTime(), masterSource);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
index 3f4590c..4d5c946 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterOperationsForRegionReplicas.java
@@ -200,7 +200,7 @@ public class TestMasterOperationsForRegionReplicas {
       StartMiniClusterOption option = StartMiniClusterOption.builder()
           .numRegionServers(numSlaves).rsPorts(rsports).build();
       TEST_UTIL.startMiniHBaseCluster(option);
-      TEST_UTIL.waitTableEnabled(tableName);
+      TEST_UTIL.waitTableAvailable(tableName);
       validateFromSnapshotFromMeta(TEST_UTIL, tableName, numRegions, numReplica,
         ADMIN.getConnection());
 
@@ -208,7 +208,7 @@ public class TestMasterOperationsForRegionReplicas {
       // one server running
       TEST_UTIL.shutdownMiniHBaseCluster();
       TEST_UTIL.startMiniHBaseCluster();
-      TEST_UTIL.waitTableEnabled(tableName);
+      TEST_UTIL.waitTableAvailable(tableName);
       validateSingleRegionServerAssignment(ADMIN.getConnection(), numRegions, numReplica);
       for (int i = 1; i < numSlaves; i++) { //restore the cluster
         TEST_UTIL.getMiniHBaseCluster().startRegionServer();
@@ -334,7 +334,7 @@ public class TestMasterOperationsForRegionReplicas {
       connection);
     snapshot.initialize();
     Map<RegionInfo, ServerName> regionToServerMap = snapshot.getRegionToRegionServerMap();
-    assert(regionToServerMap.size() == numRegions * numReplica + 1); //'1' for the namespace
+    assert(regionToServerMap.size() == numRegions * numReplica);
     Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
     for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) {
       if (entry.getKey().equals(util.getHBaseCluster().getMaster().getServerName())) {
@@ -361,14 +361,14 @@ public class TestMasterOperationsForRegionReplicas {
       connection);
     snapshot.initialize();
     Map<RegionInfo, ServerName>  regionToServerMap = snapshot.getRegionToRegionServerMap();
-    assertEquals(regionToServerMap.size(), numRegions * numReplica + 1);
+    assertEquals(regionToServerMap.size(), numRegions * numReplica);
     Map<ServerName, List<RegionInfo>> serverToRegionMap = snapshot.getRegionServerToRegionMap();
     assertEquals("One Region Only", 1, serverToRegionMap.keySet().size());
     for (Map.Entry<ServerName, List<RegionInfo>> entry : serverToRegionMap.entrySet()) {
       if (entry.getKey().equals(TEST_UTIL.getHBaseCluster().getMaster().getServerName())) {
         continue;
       }
-      assertEquals(entry.getValue().size(), numRegions * numReplica + 1);
+      assertEquals(entry.getValue().size(), numRegions * numReplica);
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
index 665e5c6..fca2866 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
@@ -91,9 +91,8 @@ public class TestMasterRestartAfterDisablingTable {
     TEST_UTIL.getAdmin().disableTable(tableName);
 
     NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    assertEquals(
-        "The number of regions for the table tableRestart should be 0 and only"
-            + "the catalog and namespace tables should be present.", 2, regions.size());
+    assertEquals("The number of regions for the table tableRestart should be 0 and only" +
+      "the catalog table should be present.", 1, regions.size());
 
     List<MasterThread> masterThreads = cluster.getMasterThreads();
     MasterThread activeMaster = null;
@@ -120,12 +119,10 @@ public class TestMasterRestartAfterDisablingTable {
     TEST_UTIL.waitUntilNoRegionsInTransition(60000);
     log("Verifying there are " + numRegions + " assigned on cluster\n");
     regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    assertEquals("The assigned regions were not onlined after master"
-        + " switch except for the catalog and namespace tables.",
-          6, regions.size());
-    assertTrue("The table should be in enabled state",
-        cluster.getMaster().getTableStateManager()
-        .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
+    assertEquals("The assigned regions were not onlined after master" +
+      " switch except for the catalog table.", 5, regions.size());
+    assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
+      .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
     ht.close();
     TEST_UTIL.shutdownMiniCluster();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
new file mode 100644
index 0000000..d114317
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMigrateNamespaceTable.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineNamespaceProcedure;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+
+/**
+ * Testcase for HBASE-21154.
+ */
+@Category({ MasterTests.class, LargeTests.class })
+public class TestMigrateNamespaceTable {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestMigrateNamespaceTable.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.startMiniCluster(1);
+  }
+
+  @AfterClass
+  public static void tearDow() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testMigrate() throws IOException, InterruptedException {
+    UTIL.getAdmin().createTable(TableDescriptorBuilder.NAMESPACE_TABLEDESC);
+    try (Table table = UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME)) {
+      for (int i = 0; i < 5; i++) {
+        NamespaceDescriptor nd = NamespaceDescriptor.create("Test-NS-" + i)
+          .addConfiguration("key-" + i, "value-" + i).build();
+        table.put(new Put(Bytes.toBytes(nd.getName())).addColumn(
+          TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES,
+          TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES,
+          ProtobufUtil.toProtoNamespaceDescriptor(nd).toByteArray()));
+        AbstractStateMachineNamespaceProcedure
+          .createDirectory(UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem(), nd);
+      }
+    }
+    MasterThread masterThread = UTIL.getMiniHBaseCluster().getMasterThread();
+    masterThread.getMaster().stop("For testing");
+    masterThread.join();
+    UTIL.getMiniHBaseCluster().startMaster();
+    // 5 + default and system('hbase')
+    assertEquals(7, UTIL.getAdmin().listNamespaceDescriptors().length);
+    for (int i = 0; i < 5; i++) {
+      NamespaceDescriptor nd = UTIL.getAdmin().getNamespaceDescriptor("Test-NS-" + i);
+      assertEquals("Test-NS-" + i, nd.getName());
+      assertEquals(1, nd.getConfiguration().size());
+      assertEquals("value-" + i, nd.getConfigurationValue("key-" + i));
+    }
+    UTIL.waitFor(30000, () -> UTIL.getAdmin().isTableDisabled(TableName.NAMESPACE_TABLE_NAME));
+
+    masterThread = UTIL.getMiniHBaseCluster().getMasterThread();
+    masterThread.getMaster().stop("For testing");
+    masterThread.join();
+
+    UTIL.getMiniHBaseCluster().startMaster();
+
+    // make sure that we could still restart the cluster after disabling the namespace table.
+    assertEquals(7, UTIL.getAdmin().listNamespaceDescriptors().length);
+
+    // let's delete the namespace table
+    UTIL.getAdmin().deleteTable(TableName.NAMESPACE_TABLE_NAME);
+    assertFalse(UTIL.getAdmin().tableExists(TableName.NAMESPACE_TABLE_NAME));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
index 04720cd..4ba1876 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRestartCluster.java
@@ -81,7 +81,7 @@ public class TestRestartCluster {
     }
 
     List<RegionInfo> allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
-    assertEquals(4, allRegions.size());
+    assertEquals(3, allRegions.size());
 
     LOG.info("\n\nShutting down cluster");
     UTIL.shutdownMiniHBaseCluster();
@@ -96,7 +96,7 @@ public class TestRestartCluster {
     // Otherwise we're reusing an Connection that has gone stale because
     // the shutdown of the cluster also called shut of the connection.
     allRegions = MetaTableAccessor.getAllRegions(UTIL.getConnection(), false);
-    assertEquals(4, allRegions.size());
+    assertEquals(3, allRegions.size());
     LOG.info("\n\nWaiting for tables to be available");
     for(TableName TABLE: TABLES) {
       try {
@@ -201,9 +201,6 @@ public class TestRestartCluster {
       snapshot.getRegionToRegionServerMap();
     assertEquals(regionToRegionServerMap.size(), newRegionToRegionServerMap.size());
     for (Map.Entry<RegionInfo, ServerName> entry : newRegionToRegionServerMap.entrySet()) {
-      if (TableName.NAMESPACE_TABLE_NAME.equals(entry.getKey().getTable())) {
-        continue;
-      }
       ServerName oldServer = regionToRegionServerMap.get(entry.getKey());
       ServerName currentServer = entry.getValue();
       LOG.info(

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
index ff8ad0b..8a6f708 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
@@ -103,11 +103,13 @@ public class TestRollingRestart {
     log("Waiting for no more RIT\n");
     TEST_UTIL.waitUntilNoRegionsInTransition(60000);
     NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
-    log("Verifying only catalog and namespace regions are assigned\n");
-    if (regions.size() != 2) {
-      for (String oregion : regions) log("Region still online: " + oregion);
+    log("Verifying only catalog region is assigned\n");
+    if (regions.size() != 1) {
+      for (String oregion : regions) {
+        log("Region still online: " + oregion);
+      }
     }
-    assertEquals(2, regions.size());
+    assertEquals(1, regions.size());
     log("Enabling table\n");
     TEST_UTIL.getAdmin().enableTable(tableName);
     log("Waiting for no more RIT\n");

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java
index 05047c6..bb95a6f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionMoveAndAbandon.java
@@ -64,6 +64,7 @@ public class TestRegionMoveAndAbandon {
   private MiniZooKeeperCluster zkCluster;
   private HRegionServer rs1;
   private HRegionServer rs2;
+  private TableName tableName;
   private RegionInfo regionInfo;
 
   @Before
@@ -75,10 +76,10 @@ public class TestRegionMoveAndAbandon {
     rs1 = cluster.getRegionServer(0);
     rs2 = cluster.getRegionServer(1);
     assertEquals(2, cluster.getRegionServerThreads().size());
-    // We'll use hbase:namespace for our testing
-    UTIL.waitTableAvailable(TableName.NAMESPACE_TABLE_NAME, 30_000);
-    regionInfo =
-      Iterables.getOnlyElement(cluster.getRegions(TableName.NAMESPACE_TABLE_NAME)).getRegionInfo();
+    tableName = TableName.valueOf(name.getMethodName());
+    UTIL.createTable(tableName, Bytes.toBytes("cf"));
+    UTIL.waitTableAvailable(tableName, 30_000);
+    regionInfo = Iterables.getOnlyElement(cluster.getRegions(tableName)).getRegionInfo();
   }
 
   @After
@@ -105,7 +106,7 @@ public class TestRegionMoveAndAbandon {
     // Stop RS1
     cluster.killRegionServer(rs1.getServerName());
     // Region should get moved to RS2
-    UTIL.waitTableAvailable(TableName.NAMESPACE_TABLE_NAME, 30_000);
+    UTIL.waitTableAvailable(tableName, 30_000);
     // Restart the master
     LOG.info("Killing master {}", cluster.getMaster().getServerName());
     cluster.killMaster(cluster.getMaster().getServerName());
@@ -120,7 +121,7 @@ public class TestRegionMoveAndAbandon {
     UTIL.waitFor(30_000, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        try (Table nsTable = UTIL.getConnection().getTable(TableName.NAMESPACE_TABLE_NAME)) {
+        try (Table nsTable = UTIL.getConnection().getTable(tableName)) {
           // Doesn't matter what we're getting. We just want to make sure we can access the region
           nsTable.get(new Get(Bytes.toBytes("a")));
           return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
index a74a3e5..4bc566c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
@@ -160,7 +160,6 @@ public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
     LoadBalancer balancer = master.getLoadBalancer();
     List<RegionInfo> regions = admin.getRegions(tableName);
     regions.addAll(admin.getTableRegions(TableName.META_TABLE_NAME));
-    regions.addAll(admin.getTableRegions(TableName.NAMESPACE_TABLE_NAME));
     List<ServerName> servers = Lists.newArrayList(
       admin.getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().keySet());
     Map<ServerName, List<RegionInfo>> map = balancer.roundRobinAssignment(regions, servers);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 66e72d0..484ebaa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -902,7 +902,7 @@ public class TestMasterProcedureScheduler {
 
     @Override
     public TableName getTableName() {
-      return TableName.NAMESPACE_TABLE_NAME;
+      return TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME;
     }
 
     @Override
@@ -1009,7 +1009,7 @@ public class TestMasterProcedureScheduler {
 
     LockedResource tableResource = locks.get(1);
     assertLockResource(tableResource, LockedResourceType.TABLE,
-      TableName.NAMESPACE_TABLE_NAME.getNameAsString());
+      TableProcedureInterface.DUMMY_NAMESPACE_TABLE_NAME.getNameAsString());
     assertSharedLock(tableResource, 1);
     assertTrue(tableResource.getWaitingProcedures().isEmpty());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
index e76aacd..4622f79 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
@@ -240,11 +240,9 @@ public class TestModifyNamespaceProcedure {
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
 
     // Modify
-    nsd.setConfiguration(nsKey, nsValue);
-
     // Start the Modify procedure && kill the executor
-    long procId = procExec.submitProcedure(
-      new ModifyNamespaceProcedure(procExec.getEnvironment(), nsd));
+    long procId = procExec.submitProcedure(new ModifyNamespaceProcedure(procExec.getEnvironment(),
+      NamespaceDescriptor.create(nsd).addConfiguration(nsKey, nsValue).build()));
 
     int lastStep = 2; // failing before MODIFY_NAMESPACE_UPDATE_NS_TABLE
     MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, lastStep);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
index 32fb173..9ebad27 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestProcedurePriority.java
@@ -136,8 +136,6 @@ public class TestProcedurePriority {
       .stream().filter(t -> !t.getRegionServer().getRegions(TableName.META_TABLE_NAME).isEmpty())
       .findAny().get();
     HRegionServer rsNoMeta = UTIL.getOtherRegionServer(rsWithMetaThread.getRegionServer());
-    // wait for NS table initialization to avoid our error inject affecting master initialization
-    UTIL.waitTableAvailable(TableName.NAMESPACE_TABLE_NAME);
     FAIL = true;
     UTIL.getMiniHBaseCluster().killRegionServer(rsNoMeta.getServerName());
     // wait until all the worker thread are stuck, which means that the stuck checker will start to

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java
index e8c9423..7892d44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestLogRoller.java
@@ -24,9 +24,11 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.regionserver.wal.FSHLog;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.junit.After;
 import org.junit.Before;
@@ -49,6 +51,9 @@ public class TestLogRoller {
   public void setup() throws Exception {
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.logroll.period", logRollPeriod);
     TEST_UTIL.startMiniCluster(1);
+    TableName name = TableName.valueOf("Test");
+    TEST_UTIL.createTable(name, Bytes.toBytes("cf"));
+    TEST_UTIL.waitTableAvailable(name);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index 9bbce09..0e7c019 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -443,7 +443,6 @@ public class TestPerColumnFamilyFlush {
    * When a log roll is about to happen, we do a flush of the regions who will be affected by the
    * log roll. These flushes cannot be a selective flushes, otherwise we cannot roll the logs. This
    * test ensures that we do a full-flush in that scenario.
-   * @throws IOException
    */
   @Test
   public void testFlushingWhenLogRolling() throws Exception {
@@ -467,12 +466,6 @@ public class TestPerColumnFamilyFlush {
     TEST_UTIL.startMiniCluster(numRegionServers);
     try {
       Table table = TEST_UTIL.createTable(tableName, FAMILIES);
-      // Force flush the namespace table so edits to it are not hanging around as oldest
-      // edits. Otherwise, below, when we make maximum number of WAL files, then it will be
-      // the namespace region that is flushed and not the below 'desiredRegion'.
-      try (Admin admin = TEST_UTIL.getConnection().getAdmin()) {
-        admin.flush(TableName.NAMESPACE_TABLE_NAME);
-      }
       Pair<HRegion, HRegionServer> desiredRegionAndServer = getRegionWithName(tableName);
       final HRegion desiredRegion = desiredRegionAndServer.getFirst();
       assertTrue("Could not find a region which hosts the new region.", desiredRegion != null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
index d8a9074..a4c0918 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionReplicasWithRestartScenarios.java
@@ -135,7 +135,7 @@ public class TestRegionReplicasWithRestartScenarios {
     checkDuplicates(onlineRegions3);
     assertFalse(res);
     int totalRegions = onlineRegions.size() + onlineRegions2.size() + onlineRegions3.size();
-    assertEquals(62, totalRegions);
+    assertEquals(61, totalRegions);
   }
 
   private boolean checkDuplicates(Collection<HRegion> onlineRegions3) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 79ee15d..40c3d29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -239,7 +239,7 @@ public class TestRegionServerMetrics {
 
   @Test
   public void testRegionCount() throws Exception {
-    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER? 1: 3, serverSource);
+    metricsHelper.assertGauge("regionCount", TABLES_ON_MASTER ? 1 : 2, serverSource);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
index 83bd9ab..4c19aa0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestLogRolling.java
@@ -254,9 +254,6 @@ public abstract class AbstractTestLogRolling  {
       final WAL log = server.getWAL(region.getRegionInfo());
       Store s = region.getStore(HConstants.CATALOG_FAMILY);
 
-      //have to flush namespace to ensure it doesn't affect wall tests
-      admin.flush(TableName.NAMESPACE_TABLE_NAME);
-
       // Put some stuff into table, to make sure we have some files to compact.
       for (int i = 1; i <= 2; ++i) {
         doPut(table, i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
index f2c5e50..7faaefb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
@@ -73,14 +73,7 @@ public class TestReplicationWALEntryFilters {
 
     assertNull(filter.filter(metaEntry));
 
-    // ns table
-    WALKeyImpl key2 =
-        new WALKeyImpl(new byte[0], TableName.NAMESPACE_TABLE_NAME, System.currentTimeMillis());
-    Entry nsEntry = new Entry(key2, null);
-    assertNull(filter.filter(nsEntry));
-
     // user table
-
     WALKeyImpl key3 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
         System.currentTimeMillis());
     Entry userEntry = new Entry(key3, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/1acbd36c/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
index 85ab16d..0855559 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java
@@ -80,7 +80,6 @@ public class TestHBaseFsckMOB extends BaseTestHBaseFsck {
     admin.setBalancerRunning(false, true);
 
     TEST_UTIL.waitUntilAllRegionsAssigned(TableName.META_TABLE_NAME);
-    TEST_UTIL.waitUntilAllRegionsAssigned(TableName.NAMESPACE_TABLE_NAME);
   }
 
   @AfterClass


[44/51] [abbrv] hbase git commit: HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)

Posted by el...@apache.org.
HBASE-21551 Memory leak when use scan with STREAM at server side - (addendum)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/67ab8b88
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/67ab8b88
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/67ab8b88

Branch: refs/heads/HBASE-20952
Commit: 67ab8b888f8b393979624a2bd7d527fefd9dd6d7
Parents: 3b85485
Author: huzheng <op...@gmail.com>
Authored: Thu Dec 6 11:26:52 2018 +0800
Committer: huzheng <op...@gmail.com>
Committed: Thu Dec 6 11:26:52 2018 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/TestSwitchToStreamRead.java      | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/67ab8b88/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
index 037b13e..c1cecf8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSwitchToStreamRead.java
@@ -47,9 +47,9 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -74,8 +74,8 @@ public class TestSwitchToStreamRead {
 
   private static HRegion REGION;
 
-  @BeforeClass
-  public static void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
     UTIL.getConfiguration().setLong(StoreScanner.STORESCANNER_PREAD_MAX_BYTES, 2048);
     StringBuilder sb = new StringBuilder(256);
     for (int i = 0; i < 255; i++) {
@@ -99,8 +99,8 @@ public class TestSwitchToStreamRead {
     }
   }
 
-  @AfterClass
-  public static void tearDown() throws IOException {
+  @After
+  public void tearDown() throws IOException {
     REGION.close(true);
     UTIL.cleanupTestDir();
   }


[06/51] [abbrv] hbase git commit: HBASE-21141 Enable MOB in backup / restore test involving incremental backup

Posted by el...@apache.org.
HBASE-21141 Enable MOB in backup / restore test involving incremental backup

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/825e14b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/825e14b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/825e14b6

Branch: refs/heads/HBASE-20952
Commit: 825e14b68eefb232482dbc2a416c9844f03d01c4
Parents: 43a10df
Author: Artem Ervits <ge...@gmail.com>
Authored: Fri Nov 16 13:51:03 2018 -0500
Committer: tedyu <yu...@gmail.com>
Committed: Fri Nov 16 13:07:25 2018 -0800

----------------------------------------------------------------------
 .../hbase/backup/TestIncrementalBackup.java     | 275 ++++++++++---------
 1 file changed, 143 insertions(+), 132 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/825e14b6/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
index 48e2c5e..6e15238 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackup.java
@@ -63,7 +63,7 @@ public class TestIncrementalBackup extends TestBackupBase {
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
     provider = "multiwal";
-    List<Object[]> params = new ArrayList<Object[]>();
+    List<Object[]> params = new ArrayList<>();
     params.add(new Object[] { Boolean.TRUE });
     return params;
   }
@@ -71,146 +71,157 @@ public class TestIncrementalBackup extends TestBackupBase {
   public TestIncrementalBackup(Boolean b) {
   }
 
-  // implement all test cases in 1 test since incremental backup/restore has dependencies
+  // implement all test cases in 1 test since incremental
+  // backup/restore has dependencies
   @Test
   public void TestIncBackupRestore() throws Exception {
-
     int ADD_ROWS = 99;
+
     // #1 - create full backup for all tables
     LOG.info("create full backup image for all tables");
-
     List<TableName> tables = Lists.newArrayList(table1, table2);
     final byte[] fam3Name = Bytes.toBytes("f3");
+    final byte[] mobName = Bytes.toBytes("mob");
+
     table1Desc.addFamily(new HColumnDescriptor(fam3Name));
+    HColumnDescriptor mobHcd = new HColumnDescriptor(mobName);
+    mobHcd.setMobEnabled(true);
+    mobHcd.setMobThreshold(5L);
+    table1Desc.addFamily(mobHcd);
     HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
 
-    Connection conn = ConnectionFactory.createConnection(conf1);
-    int NB_ROWS_FAM3 = 6;
-    insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
-
-    HBaseAdmin admin = null;
-    admin = (HBaseAdmin) conn.getAdmin();
-    BackupAdminImpl client = new BackupAdminImpl(conn);
-
-    BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
-    String backupIdFull = client.backupTables(request);
-
-    assertTrue(checkSucceeded(backupIdFull));
-
-    // #2 - insert some data to table
-    HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
-    LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
-
-    Assert.assertEquals(TEST_UTIL.countRows(t1), NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
-    t1.close();
-    LOG.debug("written " + ADD_ROWS + " rows to " + table1);
-
-    HTable t2 = (HTable) conn.getTable(table2);
-    Put p2;
-    for (int i = 0; i < 5; i++) {
-      p2 = new Put(Bytes.toBytes("row-t2" + i));
-      p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
-      t2.put(p2);
-    }
-
-    Assert.assertEquals(TEST_UTIL.countRows(t2), NB_ROWS_IN_BATCH + 5);
-    t2.close();
-    LOG.debug("written " + 5 + " rows to " + table2);
-    // split table1
-    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-    List<HRegion> regions = cluster.getRegions(table1);
-
-    byte[] name = regions.get(0).getRegionInfo().getRegionName();
-    long startSplitTime = EnvironmentEdgeManager.currentTime();
-    try {
-      admin.splitRegion(name);
-    } catch (IOException e) {
-      //although split fail, this may not affect following check
-      //In old split without AM2, if region's best split key is not found,
-      //there are not exception thrown. But in current API, exception
-      //will be thrown.
-      LOG.debug("region is not splittable, because " + e);
-    }
-
-    while (!admin.isTableAvailable(table1)) {
-      Thread.sleep(100);
+    try (Connection conn = ConnectionFactory.createConnection(conf1)) {
+      int NB_ROWS_FAM3 = 6;
+      insertIntoTable(conn, table1, fam3Name, 3, NB_ROWS_FAM3).close();
+      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_FAM3).close();
+      HBaseAdmin admin = null;
+      admin = (HBaseAdmin) conn.getAdmin();
+      BackupAdminImpl client = new BackupAdminImpl(conn);
+      BackupRequest request = createBackupRequest(BackupType.FULL, tables, BACKUP_ROOT_DIR);
+      String backupIdFull = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdFull));
+
+      // #2 - insert some data to table
+      HTable t1 = insertIntoTable(conn, table1, famName, 1, ADD_ROWS);
+      LOG.debug("writing " + ADD_ROWS + " rows to " + table1);
+      Assert.assertEquals(HBaseTestingUtility.countRows(t1),
+              NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_FAM3);
+      LOG.debug("written " + ADD_ROWS + " rows to " + table1);
+
+      // additionally, insert rows to MOB cf
+      int NB_ROWS_MOB = 111;
+      insertIntoTable(conn, table1, mobName, 3, NB_ROWS_MOB);
+      LOG.debug("written " + NB_ROWS_MOB + " rows to " + table1 + " to Mob enabled CF");
+      t1.close();
+      Assert.assertEquals(HBaseTestingUtility.countRows(t1),
+              NB_ROWS_IN_BATCH + ADD_ROWS + NB_ROWS_MOB);
+
+      HTable t2 = (HTable) conn.getTable(table2);
+      Put p2;
+      for (int i = 0; i < 5; i++) {
+        p2 = new Put(Bytes.toBytes("row-t2" + i));
+        p2.addColumn(famName, qualName, Bytes.toBytes("val" + i));
+        t2.put(p2);
+      }
+      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(t2));
+      t2.close();
+      LOG.debug("written " + 5 + " rows to " + table2);
+
+      // split table1
+      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
+      List<HRegion> regions = cluster.getRegions(table1);
+      byte[] name = regions.get(0).getRegionInfo().getRegionName();
+      long startSplitTime = EnvironmentEdgeManager.currentTime();
+
+      try {
+        admin.splitRegion(name);
+      } catch (IOException e) {
+        // although split fail, this may not affect following check in current API,
+        // exception will be thrown.
+        LOG.debug("region is not splittable, because " + e);
+      }
+      while (!admin.isTableAvailable(table1)) {
+        Thread.sleep(100);
+      }
+
+      long endSplitTime = EnvironmentEdgeManager.currentTime();
+      // split finished
+      LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
+
+      // #3 - incremental backup for multiple tables
+      tables = Lists.newArrayList(table1, table2);
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+      String backupIdIncMultiple = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple));
+
+      // add column family f2 to table1
+      final byte[] fam2Name = Bytes.toBytes("f2");
+      table1Desc.addFamily(new HColumnDescriptor(fam2Name));
+
+      // drop column family f3
+      table1Desc.removeFamily(fam3Name);
+      HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
+
+      int NB_ROWS_FAM2 = 7;
+      HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
+      t3.close();
+
+      // Wait for 5 sec to make sure that old WALs were deleted
+      Thread.sleep(5000);
+
+      // #4 - additional incremental backup for multiple tables
+      request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
+      String backupIdIncMultiple2 = client.backupTables(request);
+      assertTrue(checkSucceeded(backupIdIncMultiple2));
+
+      // #5 - restore full backup for all tables
+      TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
+      TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
+
+      LOG.debug("Restoring full " + backupIdFull);
+      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
+                tablesRestoreFull, tablesMapFull, true));
+
+      // #6.1 - check tables for full restore
+      HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
+      assertTrue(hAdmin.tableExists(table1_restore));
+      assertTrue(hAdmin.tableExists(table2_restore));
+      hAdmin.close();
+
+      // #6.2 - checking row count of tables for full restore
+      HTable hTable = (HTable) conn.getTable(table1_restore);
+      Assert.assertEquals(HBaseTestingUtility.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
+      hTable.close();
+
+      hTable = (HTable) conn.getTable(table2_restore);
+      Assert.assertEquals(NB_ROWS_IN_BATCH, HBaseTestingUtility.countRows(hTable));
+      hTable.close();
+
+      // #7 - restore incremental backup for multiple tables, with overwrite
+      TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
+      TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
+      client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2,
+              false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
+      hTable = (HTable) conn.getTable(table1_restore);
+
+      LOG.debug("After incremental restore: " + hTable.getDescriptor());
+      int countFamName = TEST_UTIL.countRows(hTable, famName);
+      LOG.debug("f1 has " + countFamName + " rows");
+      Assert.assertEquals(countFamName, NB_ROWS_IN_BATCH + ADD_ROWS);
+
+      int countFam2Name = TEST_UTIL.countRows(hTable, fam2Name);
+      LOG.debug("f2 has " + countFam2Name + " rows");
+      Assert.assertEquals(countFam2Name, NB_ROWS_FAM2);
+
+      int countMobName = TEST_UTIL.countRows(hTable, mobName);
+      LOG.debug("mob has " + countMobName + " rows");
+      Assert.assertEquals(countMobName, NB_ROWS_MOB);
+      hTable.close();
+
+      hTable = (HTable) conn.getTable(table2_restore);
+      Assert.assertEquals(NB_ROWS_IN_BATCH + 5, HBaseTestingUtility.countRows(hTable));
+      hTable.close();
+      admin.close();
     }
-
-    long endSplitTime = EnvironmentEdgeManager.currentTime();
-
-    // split finished
-    LOG.debug("split finished in =" + (endSplitTime - startSplitTime));
-
-    // #3 - incremental backup for multiple tables
-    tables = Lists.newArrayList(table1, table2);
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
-    String backupIdIncMultiple = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple));
-
-    // add column family f2 to table1
-    final byte[] fam2Name = Bytes.toBytes("f2");
-    table1Desc.addFamily(new HColumnDescriptor(fam2Name));
-    // drop column family f3
-    table1Desc.removeFamily(fam3Name);
-    HBaseTestingUtility.modifyTableSync(TEST_UTIL.getAdmin(), table1Desc);
-
-    int NB_ROWS_FAM2 = 7;
-    HTable t3 = insertIntoTable(conn, table1, fam2Name, 2, NB_ROWS_FAM2);
-    t3.close();
-    // Wait for 5 sec to make sure that old WALs were deleted
-    Thread.sleep(5000);
-
-    // #3 - incremental backup for multiple tables
-    request = createBackupRequest(BackupType.INCREMENTAL, tables, BACKUP_ROOT_DIR);
-    String backupIdIncMultiple2 = client.backupTables(request);
-    assertTrue(checkSucceeded(backupIdIncMultiple2));
-
-    // #4 - restore full backup for all tables
-    TableName[] tablesRestoreFull = new TableName[] { table1, table2 };
-
-    TableName[] tablesMapFull = new TableName[] { table1_restore, table2_restore };
-
-    LOG.debug("Restoring full " + backupIdFull);
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdFull, false,
-      tablesRestoreFull, tablesMapFull, true));
-
-    // #5.1 - check tables for full restore
-    HBaseAdmin hAdmin = TEST_UTIL.getHBaseAdmin();
-    assertTrue(hAdmin.tableExists(table1_restore));
-    assertTrue(hAdmin.tableExists(table2_restore));
-
-    hAdmin.close();
-
-    // #5.2 - checking row count of tables for full restore
-    HTable hTable = (HTable) conn.getTable(table1_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + NB_ROWS_FAM3);
-    hTable.close();
-
-    hTable = (HTable) conn.getTable(table2_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH);
-    hTable.close();
-
-    // #6 - restore incremental backup for multiple tables, with overwrite
-    TableName[] tablesRestoreIncMultiple = new TableName[] { table1, table2 };
-    TableName[] tablesMapIncMultiple = new TableName[] { table1_restore, table2_restore };
-    client.restore(BackupUtils.createRestoreRequest(BACKUP_ROOT_DIR, backupIdIncMultiple2,
-      false, tablesRestoreIncMultiple, tablesMapIncMultiple, true));
-
-    hTable = (HTable) conn.getTable(table1_restore);
-    LOG.debug("After incremental restore: " + hTable.getDescriptor());
-    LOG.debug("f1 has " + TEST_UTIL.countRows(hTable, famName) + " rows");
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, famName), NB_ROWS_IN_BATCH + ADD_ROWS);
-    LOG.debug("f2 has " + TEST_UTIL.countRows(hTable, fam2Name) + " rows");
-    Assert.assertEquals(TEST_UTIL.countRows(hTable, fam2Name), NB_ROWS_FAM2);
-    hTable.close();
-
-    hTable = (HTable) conn.getTable(table2_restore);
-    Assert.assertEquals(TEST_UTIL.countRows(hTable), NB_ROWS_IN_BATCH + 5);
-    hTable.close();
-
-    admin.close();
-    conn.close();
-
   }
-
-}
+}
\ No newline at end of file


[34/51] [abbrv] hbase git commit: HBASE-21518 TestMasterFailoverWithProcedures is flaky

Posted by el...@apache.org.
HBASE-21518 TestMasterFailoverWithProcedures is flaky

Signed-off-by: Sean Busbey <bu...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfeab9f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfeab9f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfeab9f5

Branch: refs/heads/HBASE-20952
Commit: dfeab9f5c968625ac1c642c53c721eb5e81068c0
Parents: d42e0ad
Author: Peter Somogyi <ps...@apache.org>
Authored: Thu Nov 29 18:04:53 2018 +0100
Committer: Peter Somogyi <ps...@apache.org>
Committed: Fri Nov 30 09:12:06 2018 +0100

----------------------------------------------------------------------
 .../hadoop/hbase/util/JVMClusterUtil.java       | 24 ++++++++++++++------
 1 file changed, 17 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfeab9f5/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
index 8c92f66..7518d65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
@@ -249,14 +249,24 @@ public class JVMClusterUtil {
       // Do backups first.
       JVMClusterUtil.MasterThread activeMaster = null;
       for (JVMClusterUtil.MasterThread t : masters) {
-        if (!t.master.isActiveMaster()) {
-          try {
-            t.master.stopMaster();
-          } catch (IOException e) {
-            LOG.error("Exception occurred while stopping master", e);
+        // Master was killed but could be still considered as active. Check first if it is stopped.
+        if (!t.master.isStopped()) {
+          if (!t.master.isActiveMaster()) {
+            try {
+              t.master.stopMaster();
+            } catch (IOException e) {
+              LOG.error("Exception occurred while stopping master", e);
+            }
+            LOG.info("Stopped backup Master {} is stopped: {}",
+                t.master.hashCode(), t.master.isStopped());
+          } else {
+            if (activeMaster != null) {
+              LOG.warn("Found more than 1 active master, hash {}", activeMaster.master.hashCode());
+            }
+            activeMaster = t;
+            LOG.debug("Found active master hash={}, stopped={}",
+                t.master.hashCode(), t.master.isStopped());
           }
-        } else {
-          activeMaster = t;
         }
       }
       // Do active after.


[35/51] [abbrv] hbase git commit: HBASE-21486 The current replication implementation for peer in STANDBY state breaks serial replication

Posted by el...@apache.org.
HBASE-21486 The current replication implementation for peer in STANDBY state breaks serial replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/766aa1bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/766aa1bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/766aa1bf

Branch: refs/heads/HBASE-20952
Commit: 766aa1bfccb48b4d228dd86c100fb48e9c9d61fa
Parents: dfeab9f
Author: Duo Zhang <zh...@apache.org>
Authored: Wed Nov 28 18:00:18 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Dec 1 12:15:18 2018 +0800

----------------------------------------------------------------------
 .../src/main/protobuf/MasterProcedure.proto     |  19 ++--
 .../replication/AbstractPeerProcedure.java      |  97 ++++++++++++++++-
 .../master/replication/ModifyPeerProcedure.java |  81 --------------
 ...ransitPeerSyncReplicationStateProcedure.java |  73 +++++++++----
 .../replication/SyncReplicationTestBase.java    |  30 ++++--
 .../replication/TestSerialSyncReplication.java  | 106 +++++++++++++++++++
 .../TestSyncReplicationRemoveRemoteWAL.java     |  21 +---
 7 files changed, 291 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 44ac952..cc0c6ba 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -398,15 +398,16 @@ enum PeerSyncReplicationStateTransitionState {
   PRE_PEER_SYNC_REPLICATION_STATE_TRANSITION = 1;
   SET_PEER_NEW_SYNC_REPLICATION_STATE = 2;
   REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_BEGIN = 3;
-  REPLAY_REMOTE_WAL_IN_PEER = 4;
-  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 5;
-  REOPEN_ALL_REGIONS_IN_PEER = 6;
-  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 7;
-  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 8;
-  SYNC_REPLICATION_SET_PEER_ENABLED = 9;
-  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 10;
-  CREATE_DIR_FOR_REMOTE_WAL = 11;
-  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 12;
+  REOPEN_ALL_REGIONS_IN_PEER = 4;
+  SYNC_REPLICATION_UPDATE_LAST_PUSHED_SEQ_ID_FOR_SERIAL_PEER = 5;
+  REPLAY_REMOTE_WAL_IN_PEER = 6;
+  REMOVE_ALL_REPLICATION_QUEUES_IN_PEER = 7;
+  TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE = 8;
+  REFRESH_PEER_SYNC_REPLICATION_STATE_ON_RS_END = 9;
+  SYNC_REPLICATION_SET_PEER_ENABLED = 10;
+  SYNC_REPLICATION_ENABLE_PEER_REFRESH_PEER_ON_RS = 11;
+  CREATE_DIR_FOR_REMOTE_WAL = 12;
+  POST_PEER_SYNC_REPLICATION_STATE_TRANSITION = 13;
 }
 
 message PeerModificationStateData {

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 882a050..755e0a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -17,11 +17,27 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.master.TableStateManager;
+import org.apache.hadoop.hbase.master.TableStateManager.TableStateNotFoundException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
+import org.apache.hadoop.hbase.replication.ReplicationUtils;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 
@@ -29,8 +45,15 @@ import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesti
  * The base class for all replication peer related procedure.
  */
 @InterfaceAudience.Private
-public abstract class AbstractPeerProcedure<TState>
-    extends AbstractPeerNoLockProcedure<TState> implements PeerProcedureInterface {
+public abstract class AbstractPeerProcedure<TState> extends AbstractPeerNoLockProcedure<TState>
+    implements PeerProcedureInterface {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbstractPeerProcedure.class);
+
+  protected static final int UPDATE_LAST_SEQ_ID_BATCH_SIZE = 1000;
+
+  // The sleep interval when waiting table to be enabled or disabled.
+  protected static final int SLEEP_INTERVAL_MS = 1000;
 
   // used to keep compatible with old client where we can only returns after updateStorage.
   protected ProcedurePrepareLatch latch;
@@ -75,4 +98,74 @@ public abstract class AbstractPeerProcedure<TState>
   protected void enablePeer(MasterProcedureEnv env) throws ReplicationException {
     env.getReplicationPeerManager().enablePeer(peerId);
   }
+
+  private void addToMap(Map<String, Long> lastSeqIds, String encodedRegionName, long barrier,
+      ReplicationQueueStorage queueStorage) throws ReplicationException {
+    if (barrier >= 0) {
+      lastSeqIds.put(encodedRegionName, barrier);
+      if (lastSeqIds.size() >= UPDATE_LAST_SEQ_ID_BATCH_SIZE) {
+        queueStorage.setLastSequenceIds(peerId, lastSeqIds);
+        lastSeqIds.clear();
+      }
+    }
+  }
+
+  protected final void setLastPushedSequenceId(MasterProcedureEnv env,
+      ReplicationPeerConfig peerConfig) throws IOException, ReplicationException {
+    Map<String, Long> lastSeqIds = new HashMap<String, Long>();
+    for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
+      if (!td.hasGlobalReplicationScope()) {
+        continue;
+      }
+      TableName tn = td.getTableName();
+      if (!ReplicationUtils.contains(peerConfig, tn)) {
+        continue;
+      }
+      setLastPushedSequenceIdForTable(env, tn, lastSeqIds);
+    }
+    if (!lastSeqIds.isEmpty()) {
+      env.getReplicationPeerManager().getQueueStorage().setLastSequenceIds(peerId, lastSeqIds);
+    }
+  }
+
+  // If the table is currently disabling, then we need to wait until it is disabled.We will write
+  // replication barrier for a disabled table. And return whether we need to update the last pushed
+  // sequence id, if the table has been deleted already, i.e, we hit TableStateNotFoundException,
+  // then we do not need to update last pushed sequence id for this table.
+  private boolean needSetLastPushedSequenceId(TableStateManager tsm, TableName tn)
+      throws IOException {
+    for (;;) {
+      try {
+        if (!tsm.getTableState(tn).isDisabling()) {
+          return true;
+        }
+        Thread.sleep(SLEEP_INTERVAL_MS);
+      } catch (TableStateNotFoundException e) {
+        return false;
+      } catch (InterruptedException e) {
+        throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
+      }
+    }
+  }
+
+  // Will put the encodedRegionName->lastPushedSeqId pair into the map passed in, if the map is
+  // large enough we will call queueStorage.setLastSequenceIds and clear the map. So the caller
+  // should not forget to check whether the map is empty at last, if not you should call
+  // queueStorage.setLastSequenceIds to write out the remaining entries in the map.
+  protected final void setLastPushedSequenceIdForTable(MasterProcedureEnv env, TableName tableName,
+      Map<String, Long> lastSeqIds) throws IOException, ReplicationException {
+    TableStateManager tsm = env.getMasterServices().getTableStateManager();
+    ReplicationQueueStorage queueStorage = env.getReplicationPeerManager().getQueueStorage();
+    Connection conn = env.getMasterServices().getConnection();
+    if (!needSetLastPushedSequenceId(tsm, tableName)) {
+      LOG.debug("Skip settting last pushed sequence id for {}", tableName);
+      return;
+    }
+    for (Pair<String, Long> name2Barrier : MetaTableAccessor
+      .getTableEncodedRegionNameAndLastBarrier(conn, tableName)) {
+      LOG.trace("Update last pushed sequence id for {}, {}", tableName, name2Barrier);
+      addToMap(lastSeqIds, name2Barrier.getFirst(), name2Barrier.getSecond().longValue() - 1,
+        queueStorage);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index 9550fb0..d5d2779 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -19,11 +19,7 @@ package org.apache.hadoop.hbase.master.replication;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.TableStateManager;
@@ -35,9 +31,7 @@ import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationQueueStorage;
 import org.apache.hadoop.hbase.replication.ReplicationUtils;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -55,11 +49,6 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
 
   private static final Logger LOG = LoggerFactory.getLogger(ModifyPeerProcedure.class);
 
-  protected static final int UPDATE_LAST_SEQ_ID_BATCH_SIZE = 1000;
-
-  // The sleep interval when waiting table to be enabled or disabled.
-  protected static final int SLEEP_INTERVAL_MS = 1000;
-
   protected ModifyPeerProcedure() {
   }
 
@@ -169,76 +158,6 @@ public abstract class ModifyPeerProcedure extends AbstractPeerProcedure<PeerModi
     }
   }
 
-  private void addToMap(Map<String, Long> lastSeqIds, String encodedRegionName, long barrier,
-      ReplicationQueueStorage queueStorage) throws ReplicationException {
-    if (barrier >= 0) {
-      lastSeqIds.put(encodedRegionName, barrier);
-      if (lastSeqIds.size() >= UPDATE_LAST_SEQ_ID_BATCH_SIZE) {
-        queueStorage.setLastSequenceIds(peerId, lastSeqIds);
-        lastSeqIds.clear();
-      }
-    }
-  }
-
-  protected final void setLastPushedSequenceId(MasterProcedureEnv env,
-      ReplicationPeerConfig peerConfig) throws IOException, ReplicationException {
-    Map<String, Long> lastSeqIds = new HashMap<String, Long>();
-    for (TableDescriptor td : env.getMasterServices().getTableDescriptors().getAll().values()) {
-      if (!td.hasGlobalReplicationScope()) {
-        continue;
-      }
-      TableName tn = td.getTableName();
-      if (!ReplicationUtils.contains(peerConfig, tn)) {
-        continue;
-      }
-      setLastPushedSequenceIdForTable(env, tn, lastSeqIds);
-    }
-    if (!lastSeqIds.isEmpty()) {
-      env.getReplicationPeerManager().getQueueStorage().setLastSequenceIds(peerId, lastSeqIds);
-    }
-  }
-
-  // If the table is currently disabling, then we need to wait until it is disabled.We will write
-  // replication barrier for a disabled table. And return whether we need to update the last pushed
-  // sequence id, if the table has been deleted already, i.e, we hit TableStateNotFoundException,
-  // then we do not need to update last pushed sequence id for this table.
-  private boolean needSetLastPushedSequenceId(TableStateManager tsm, TableName tn)
-      throws IOException {
-    for (;;) {
-      try {
-        if (!tsm.getTableState(tn).isDisabling()) {
-          return true;
-        }
-        Thread.sleep(SLEEP_INTERVAL_MS);
-      } catch (TableStateNotFoundException e) {
-        return false;
-      } catch (InterruptedException e) {
-        throw (IOException) new InterruptedIOException(e.getMessage()).initCause(e);
-      }
-    }
-  }
-
-  // Will put the encodedRegionName->lastPushedSeqId pair into the map passed in, if the map is
-  // large enough we will call queueStorage.setLastSequenceIds and clear the map. So the caller
-  // should not forget to check whether the map is empty at last, if not you should call
-  // queueStorage.setLastSequenceIds to write out the remaining entries in the map.
-  protected final void setLastPushedSequenceIdForTable(MasterProcedureEnv env, TableName tableName,
-      Map<String, Long> lastSeqIds) throws IOException, ReplicationException {
-    TableStateManager tsm = env.getMasterServices().getTableStateManager();
-    ReplicationQueueStorage queueStorage = env.getReplicationPeerManager().getQueueStorage();
-    Connection conn = env.getMasterServices().getConnection();
-    if (!needSetLastPushedSequenceId(tsm, tableName)) {
-      LOG.debug("Skip settting last pushed sequence id for {}", tableName);
-      return;
-    }
-    for (Pair<String, Long> name2Barrier : MetaTableAccessor
-      .getTableEncodedRegionNameAndLastBarrier(conn, tableName)) {
-      LOG.trace("Update last pushed sequence id for {}, {}", tableName, name2Barrier);
-      addToMap(lastSeqIds, name2Barrier.getFirst(), name2Barrier.getSecond().longValue() - 1,
-        queueStorage);
-    }
-  }
-
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState state)
       throws ProcedureSuspendedException, InterruptedException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
index 8c6232f..fcf41be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/TransitPeerSyncReplicationStateProcedure.java
@@ -50,7 +50,7 @@ public class TransitPeerSyncReplicationStateProcedure
     extends AbstractPeerProcedure<PeerSyncReplicationStateTransitionState> {
 
   private static final Logger LOG =
-      LoggerFactory.getLogger(TransitPeerSyncReplicationStateProcedure.class);
+    LoggerFactory.getLogger(TransitPeerSyncReplicationStateProcedure.class);
 
   protected SyncReplicationState fromState;
 
@@ -58,6 +58,8 @@ public class TransitPeerSyncReplicationStateProcedure
 
   private boolean enabled;
 
+  private boolean serial;
+
   public TransitPeerSyncReplicationStateProcedure() {
   }
 
@@ -75,8 +77,8 @@ public class TransitPeerSyncReplicationStateProcedure
   protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.serializeStateData(serializer);
     TransitPeerSyncReplicationStateStateData.Builder builder =
-        TransitPeerSyncReplicationStateStateData.newBuilder()
-          .setToState(ReplicationPeerConfigUtil.toSyncReplicationState(toState));
+      TransitPeerSyncReplicationStateStateData.newBuilder()
+        .setToState(ReplicationPeerConfigUtil.toSyncReplicationState(toState));
     if (fromState != null) {
       builder.setFromState(ReplicationPeerConfigUtil.toSyncReplicationState(fromState));
     }
@@ -87,7 +89,7 @@ public class TransitPeerSyncReplicationStateProcedure
   protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
     super.deserializeStateData(serializer);
     TransitPeerSyncReplicationStateStateData data =
-        serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
+      serializer.deserialize(TransitPeerSyncReplicationStateStateData.class);
     toState = ReplicationPeerConfigUtil.toSyncReplicationState(data.getToState());
     if (data.hasFromState()) {
       fromState = ReplicationPeerConfigUtil.toSyncReplicationState(data.getFromState());
@@ -129,6 +131,7 @@ public class TransitPeerSyncReplicationStateProcedure
     }
     fromState = desc.getSyncReplicationState();
     enabled = desc.isEnabled();
+    serial = desc.getPeerConfig().isSerial();
   }
 
   private void postTransit(MasterProcedureEnv env) throws IOException {
@@ -174,7 +177,11 @@ public class TransitPeerSyncReplicationStateProcedure
         : PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER);
     } else {
       assert toState.equals(SyncReplicationState.DOWNGRADE_ACTIVE);
-      setNextState(PeerSyncReplicationStateTransitionState.REPLAY_REMOTE_WAL_IN_PEER);
+      // for serial peer, we need to reopen all the regions and then update the last pushed sequence
+      // id, before replaying any remote wals, so that the serial replication will not be stuck, and
+      // also guarantee the order when replicating the remote wal back.
+      setNextState(serial ? PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER
+        : PeerSyncReplicationStateTransitionState.REPLAY_REMOTE_WAL_IN_PEER);
     }
   }
 
@@ -183,6 +190,11 @@ public class TransitPeerSyncReplicationStateProcedure
       setNextState(
         enabled ? PeerSyncReplicationStateTransitionState.SYNC_REPLICATION_SET_PEER_ENABLED
           : PeerSyncReplicationStateTransitionState.CREATE_DIR_FOR_REMOTE_WAL);
+    } else if (fromState == SyncReplicationState.STANDBY) {
+      assert toState.equals(SyncReplicationState.DOWNGRADE_ACTIVE);
+      setNextState(serial && enabled
+        ? PeerSyncReplicationStateTransitionState.SYNC_REPLICATION_SET_PEER_ENABLED
+        : PeerSyncReplicationStateTransitionState.POST_PEER_SYNC_REPLICATION_STATE_TRANSITION);
     } else {
       setNextState(
         PeerSyncReplicationStateTransitionState.POST_PEER_SYNC_REPLICATION_STATE_TRANSITION);
@@ -196,14 +208,20 @@ public class TransitPeerSyncReplicationStateProcedure
   @VisibleForTesting
   protected void setPeerNewSyncReplicationState(MasterProcedureEnv env)
       throws ReplicationException {
-    env.getReplicationPeerManager().setPeerNewSyncReplicationState(peerId, toState);
-    if (toState.equals(SyncReplicationState.STANDBY) && enabled) {
-      // disable the peer if we are going to transit to STANDBY state, as we need to remove
+    if (toState.equals(SyncReplicationState.STANDBY) ||
+      (fromState.equals(SyncReplicationState.STANDBY) && serial) && enabled) {
+      // Disable the peer if we are going to transit to STANDBY state, as we need to remove
       // all the pending replication files. If we do not disable the peer and delete the wal
       // queues on zk directly, RS will get NoNode exception when updating the wal position
       // and crash.
+      // Disable the peer if we are going to transit from STANDBY to DOWNGRADE_ACTIVE, and the
+      // replication is serial, as we need to update the lastPushedSequence id after we reopen all
+      // the regions, and for performance reason here we will update in batch, without using CAS, if
+      // we are still replicating at RS side, we may accidentally update the last pushed sequence id
+      // to a less value and cause the replication to be stuck.
       env.getReplicationPeerManager().disablePeer(peerId);
     }
+    env.getReplicationPeerManager().setPeerNewSyncReplicationState(peerId, toState);
   }
 
   @VisibleForTesting
@@ -240,7 +258,7 @@ public class TransitPeerSyncReplicationStateProcedure
           long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
           LOG.warn(
             "Failed to update peer storage for peer {} when starting transiting sync " +
-                "replication peer state from {} to {}, sleep {} secs and retry",
+              "replication peer state from {} to {}, sleep {} secs and retry",
             peerId, fromState, toState, backoff / 1000, e);
           throw suspend(backoff);
         }
@@ -254,6 +272,30 @@ public class TransitPeerSyncReplicationStateProcedure
           .toArray(RefreshPeerProcedure[]::new));
         setNextStateAfterRefreshBegin();
         return Flow.HAS_MORE_STATE;
+      case REOPEN_ALL_REGIONS_IN_PEER:
+        reopenRegions(env);
+        if (fromState.equals(SyncReplicationState.STANDBY)) {
+          assert serial;
+          setNextState(
+            PeerSyncReplicationStateTransitionState.SYNC_REPLICATION_UPDATE_LAST_PUSHED_SEQ_ID_FOR_SERIAL_PEER);
+        } else {
+          setNextState(
+            PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
+        }
+        return Flow.HAS_MORE_STATE;
+      case SYNC_REPLICATION_UPDATE_LAST_PUSHED_SEQ_ID_FOR_SERIAL_PEER:
+        try {
+          setLastPushedSequenceId(env, env.getReplicationPeerManager().getPeerConfig(peerId).get());
+        } catch (Exception e) {
+          long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
+          LOG.warn(
+            "Failed to update last pushed sequence id for peer {} when transiting sync " +
+              "replication peer state from {} to {}, sleep {} secs and retry",
+            peerId, fromState, toState, backoff / 1000, e);
+          throw suspend(backoff);
+        }
+        setNextState(PeerSyncReplicationStateTransitionState.REPLAY_REMOTE_WAL_IN_PEER);
+        return Flow.HAS_MORE_STATE;
       case REPLAY_REMOTE_WAL_IN_PEER:
         replayRemoteWAL(env.getReplicationPeerManager().getPeerConfig(peerId).get().isSerial());
         setNextState(
@@ -266,7 +308,7 @@ public class TransitPeerSyncReplicationStateProcedure
           long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
           LOG.warn(
             "Failed to remove all replication queues peer {} when starting transiting" +
-                " sync replication peer state from {} to {}, sleep {} secs and retry",
+              " sync replication peer state from {} to {}, sleep {} secs and retry",
             peerId, fromState, toState, backoff / 1000, e);
           throw suspend(backoff);
         }
@@ -275,11 +317,6 @@ public class TransitPeerSyncReplicationStateProcedure
           ? PeerSyncReplicationStateTransitionState.REOPEN_ALL_REGIONS_IN_PEER
           : PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
         return Flow.HAS_MORE_STATE;
-      case REOPEN_ALL_REGIONS_IN_PEER:
-        reopenRegions(env);
-        setNextState(
-          PeerSyncReplicationStateTransitionState.TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE);
-        return Flow.HAS_MORE_STATE;
       case TRANSIT_PEER_NEW_SYNC_REPLICATION_STATE:
         try {
           transitPeerSyncReplicationState(env);
@@ -287,7 +324,7 @@ public class TransitPeerSyncReplicationStateProcedure
           long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
           LOG.warn(
             "Failed to update peer storage for peer {} when ending transiting sync " +
-                "replication peer state from {} to {}, sleep {} secs and retry",
+              "replication peer state from {} to {}, sleep {} secs and retry",
             peerId, fromState, toState, backoff / 1000, e);
           throw suspend(backoff);
         }
@@ -308,7 +345,7 @@ public class TransitPeerSyncReplicationStateProcedure
           long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
           LOG.warn(
             "Failed to set peer enabled for peer {} when transiting sync replication peer " +
-                "state from {} to {}, sleep {} secs and retry",
+              "state from {} to {}, sleep {} secs and retry",
             peerId, fromState, toState, backoff / 1000, e);
           throw suspend(backoff);
         }
@@ -327,7 +364,7 @@ public class TransitPeerSyncReplicationStateProcedure
           long backoff = ProcedureUtil.getBackoffTimeMs(attempts);
           LOG.warn(
             "Failed to create remote wal dir for peer {} when transiting sync replication " +
-                "peer state from {} to {}, sleep {} secs and retry",
+              "peer state from {} to {}, sleep {} secs and retry",
             peerId, fromState, toState, backoff / 1000, e);
           throw suspend(backoff);
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
index 1b52354..f373590 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/SyncReplicationTestBase.java
@@ -103,8 +103,8 @@ public class SyncReplicationTestBase {
     ZK_UTIL.startMiniZKCluster();
     initTestingUtility(UTIL1, "/cluster1");
     initTestingUtility(UTIL2, "/cluster2");
-    StartMiniClusterOption option = StartMiniClusterOption.builder()
-        .numMasters(2).numRegionServers(3).numDataNodes(3).build();
+    StartMiniClusterOption option =
+      StartMiniClusterOption.builder().numMasters(2).numRegionServers(3).numDataNodes(3).build();
     UTIL1.startMiniCluster(option);
     UTIL2.startMiniCluster(option);
     TableDescriptor td =
@@ -217,16 +217,16 @@ public class SyncReplicationTestBase {
     return getRemoteWALDir(remoteWALDir, peerId);
   }
 
-  protected Path getRemoteWALDir(Path remoteWALDir, String peerId) {
+  protected final Path getRemoteWALDir(Path remoteWALDir, String peerId) {
     return new Path(remoteWALDir, peerId);
   }
 
-  protected Path getReplayRemoteWALs(Path remoteWALDir, String peerId) {
+  protected final Path getReplayRemoteWALs(Path remoteWALDir, String peerId) {
     return new Path(remoteWALDir, peerId + "-replay");
   }
 
-  protected void verifyRemovedPeer(String peerId, Path remoteWALDir, HBaseTestingUtility utility)
-      throws Exception {
+  protected final void verifyRemovedPeer(String peerId, Path remoteWALDir,
+      HBaseTestingUtility utility) throws Exception {
     ReplicationPeerStorage rps = ReplicationStorageFactory
       .getReplicationPeerStorage(utility.getZooKeeperWatcher(), utility.getConfiguration());
     try {
@@ -247,7 +247,7 @@ public class SyncReplicationTestBase {
     }
   }
 
-  protected void verifyReplicationRequestRejection(HBaseTestingUtility utility,
+  protected final void verifyReplicationRequestRejection(HBaseTestingUtility utility,
       boolean expectedRejection) throws Exception {
     HRegionServer regionServer = utility.getRSForFirstRegionInTable(TABLE_NAME);
     ClusterConnection connection = regionServer.getClusterConnection();
@@ -270,4 +270,20 @@ public class SyncReplicationTestBase {
       }
     }
   }
+
+  protected final void waitUntilDeleted(HBaseTestingUtility util, Path remoteWAL) throws Exception {
+    MasterFileSystem mfs = util.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    util.waitFor(30000, new ExplainingPredicate<Exception>() {
+
+      @Override
+      public boolean evaluate() throws Exception {
+        return !mfs.getWALFileSystem().exists(remoteWAL);
+      }
+
+      @Override
+      public String explainFailure() throws Exception {
+        return remoteWAL + " has not been deleted yet";
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
new file mode 100644
index 0000000..6725649
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialSyncReplication.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import static org.hamcrest.CoreMatchers.endsWith;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.LogRoller;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
+
+/**
+ * Testcase to confirm that serial replication will not be stuck when using along with synchronous
+ * replication. See HBASE-21486 for more details.
+ */
+@Category({ ReplicationTests.class, LargeTests.class })
+public class TestSerialSyncReplication extends SyncReplicationTestBase {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestSerialSyncReplication.class);
+
+  @Test
+  public void test() throws Exception {
+    // change to serial
+    UTIL1.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
+      .newBuilder(UTIL1.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
+    UTIL2.getAdmin().updateReplicationPeerConfig(PEER_ID, ReplicationPeerConfig
+      .newBuilder(UTIL2.getAdmin().getReplicationPeerConfig(PEER_ID)).setSerial(true).build());
+
+    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.STANDBY);
+    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.ACTIVE);
+
+    UTIL2.getAdmin().disableReplicationPeer(PEER_ID);
+
+    writeAndVerifyReplication(UTIL1, UTIL2, 0, 100);
+
+    MasterFileSystem mfs = UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    Path remoteWALDir = ReplicationUtils.getPeerRemoteWALDir(
+      new Path(mfs.getWALRootDir(), ReplicationUtils.REMOTE_WAL_DIR_NAME), PEER_ID);
+    FileStatus[] remoteWALStatus = mfs.getWALFileSystem().listStatus(remoteWALDir);
+    assertEquals(1, remoteWALStatus.length);
+    Path remoteWAL = remoteWALStatus[0].getPath();
+    assertThat(remoteWAL.getName(), endsWith(ReplicationUtils.SYNC_WAL_SUFFIX));
+    // roll the wal writer, so that we will delete the remore wal. This is used to make sure that we
+    // will not replay this wal when transiting to DA.
+    for (RegionServerThread t : UTIL1.getMiniHBaseCluster().getRegionServerThreads()) {
+      LogRoller roller = t.getRegionServer().getWalRoller();
+      roller.requestRollAll();
+      roller.waitUntilWalRollFinished();
+    }
+    waitUntilDeleted(UTIL2, remoteWAL);
+
+    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.DOWNGRADE_ACTIVE);
+    UTIL1.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.STANDBY);
+    // let's reopen the region
+    RegionInfo region = Iterables.getOnlyElement(UTIL2.getAdmin().getRegions(TABLE_NAME));
+    HRegionServer target = UTIL2.getOtherRegionServer(UTIL2.getRSForFirstRegionInTable(TABLE_NAME));
+    UTIL2.getAdmin().move(region.getEncodedNameAsBytes(),
+      Bytes.toBytes(target.getServerName().getServerName()));
+    // here we will remove all the pending wals. This is not a normal operation sequence but anyway,
+    // user could do this.
+    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.STANDBY);
+    // transit back to DA
+    UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
+      SyncReplicationState.DOWNGRADE_ACTIVE);
+
+    UTIL2.getAdmin().enableReplicationPeer(PEER_ID);
+    // make sure that the async replication still works
+    writeAndVerifyReplication(UTIL2, UTIL1, 100, 200);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/766aa1bf/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
index 0cd1846..9f89826 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSyncReplicationRemoveRemoteWAL.java
@@ -25,7 +25,6 @@ import static org.junit.Assert.assertTrue;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
-import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -41,22 +40,6 @@ public class TestSyncReplicationRemoveRemoteWAL extends SyncReplicationTestBase
   public static final HBaseClassTestRule CLASS_RULE =
     HBaseClassTestRule.forClass(TestSyncReplicationRemoveRemoteWAL.class);
 
-  private void waitUntilDeleted(Path remoteWAL) throws Exception {
-    MasterFileSystem mfs = UTIL2.getMiniHBaseCluster().getMaster().getMasterFileSystem();
-    UTIL1.waitFor(30000, new ExplainingPredicate<Exception>() {
-
-      @Override
-      public boolean evaluate() throws Exception {
-        return !mfs.getWALFileSystem().exists(remoteWAL);
-      }
-
-      @Override
-      public String explainFailure() throws Exception {
-        return remoteWAL + " has not been deleted yet";
-      }
-    });
-  }
-
   @Test
   public void testRemoveRemoteWAL() throws Exception {
     UTIL2.getAdmin().transitReplicationPeerSyncReplicationState(PEER_ID,
@@ -76,7 +59,7 @@ public class TestSyncReplicationRemoveRemoteWAL extends SyncReplicationTestBase
     HRegionServer rs = UTIL1.getRSForFirstRegionInTable(TABLE_NAME);
     rs.getWalRoller().requestRollAll();
     // The replicated wal file should be deleted finally
-    waitUntilDeleted(remoteWAL);
+    waitUntilDeleted(UTIL2, remoteWAL);
     remoteWALStatus = mfs.getWALFileSystem().listStatus(remoteWALDir);
     assertEquals(1, remoteWALStatus.length);
     remoteWAL = remoteWALStatus[0].getPath();
@@ -95,6 +78,6 @@ public class TestSyncReplicationRemoveRemoteWAL extends SyncReplicationTestBase
     verifyThroughRegion(UTIL2, 100, 200);
 
     // Confirm that we will also remove the remote wal files in DA state
-    waitUntilDeleted(remoteWAL);
+    waitUntilDeleted(UTIL2, remoteWAL);
   }
 }


[24/51] [abbrv] hbase git commit: Revert "HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method"

Posted by el...@apache.org.
Revert "HBASE-21507 Compaction failed when execute AbstractMultiFileWriter.beforeShipped() method"

Email address is broken.

This reverts commit 3133d4385989ccda3be3aa55c7d8afa798180dd0.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39bc458e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39bc458e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39bc458e

Branch: refs/heads/HBASE-20952
Commit: 39bc458eabe805d300183803ab715707e62d6096
Parents: 3133d43
Author: zhangduo <zh...@apache.org>
Authored: Tue Nov 27 20:54:51 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Nov 27 20:55:04 2018 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/AbstractMultiFileWriter.java      | 11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/39bc458e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index 43d0ad8..2fdab81 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -21,10 +21,12 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
+
 import org.apache.hadoop.fs.Path;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hbase.regionserver.CellSink;
 
 /**
  * Base class for cell sink that separates the provided cells into multiple files.
@@ -117,12 +119,9 @@ public abstract class AbstractMultiFileWriter implements CellSink, ShipperListen
 
   @Override
   public void beforeShipped() throws IOException {
-    Collection<StoreFileWriter> writers = writers();
-    if (writers != null) {
-      for (StoreFileWriter writer : writers) {
-        if (writer != null) {
-          writer.beforeShipped();
-        }
+    if (this.writers() != null) {
+      for (StoreFileWriter writer : writers()) {
+        writer.beforeShipped();
       }
     }
   }


[49/51] [abbrv] hbase git commit: HBASE-21413 Empty meta log doesn't get split when restart whole cluster Signed-off-by: stack

Posted by el...@apache.org.
HBASE-21413 Empty meta log doesn't get split when restart whole cluster
Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0e0694f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0e0694f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0e0694f

Branch: refs/heads/HBASE-20952
Commit: e0e0694fa18372e6551420687bcf28ffe0bbc1f8
Parents: 1e65bd5
Author: Allan Yang <al...@apache.org>
Authored: Thu Dec 6 21:13:03 2018 -0800
Committer: stack <st...@stack.corp.apple.com>
Committed: Thu Dec 6 21:21:41 2018 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/master/MasterWalManager.java   | 40 ++++++++++
 .../master/procedure/ServerCrashProcedure.java  |  3 +
 .../hbase/regionserver/TestCleanupMetaWAL.java  | 83 ++++++++++++++++++++
 3 files changed, 126 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
index 2b1a81f..5ab1c28 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterWalManager.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
@@ -356,4 +357,43 @@ public class MasterWalManager {
       }
     }
   }
+
+  /**
+   * For meta region open and closed normally on a server, it may leave some meta
+   * WAL in the server's wal dir. Since meta region is no long on this server,
+   * The SCP won't split those meta wals, just leaving them there. So deleting
+   * the wal dir will fail since the dir is not empty. Actually We can safely achive those
+   * meta log and Archiving the meta log and delete the dir.
+   * @param serverName the server to archive meta log
+   */
+  public void archiveMetaLog(final ServerName serverName) {
+    try {
+      Path logDir = new Path(this.rootDir,
+          AbstractFSWALProvider.getWALDirectoryName(serverName.toString()));
+      Path splitDir = logDir.suffix(AbstractFSWALProvider.SPLITTING_EXT);
+      if (fs.exists(splitDir)) {
+        FileStatus[] logfiles = FSUtils.listStatus(fs, splitDir, META_FILTER);
+        if (logfiles != null) {
+          for (FileStatus status : logfiles) {
+            if (!status.isDir()) {
+              Path newPath = AbstractFSWAL.getWALArchivePath(this.oldLogDir,
+                  status.getPath());
+              if (!FSUtils.renameAndSetModifyTime(fs, status.getPath(), newPath)) {
+                LOG.warn("Unable to move  " + status.getPath() + " to " + newPath);
+              } else {
+                LOG.debug("Archived meta log " + status.getPath() + " to " + newPath);
+              }
+            }
+          }
+        }
+        if (!fs.delete(splitDir, false)) {
+          LOG.warn("Unable to delete log dir. Ignoring. " + splitDir);
+        }
+      }
+    } catch (IOException ie) {
+      LOG.warn("Failed archiving meta log for server " + serverName, ie);
+    }
+  }
+
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 048bca8..b93f8fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -221,6 +221,9 @@ public class ServerCrashProcedure
     // PROBLEM!!! WE BLOCK HERE.
     am.getRegionStates().logSplitting(this.serverName);
     mwm.splitLog(this.serverName);
+    if (!carryingMeta) {
+      mwm.archiveMetaLog(this.serverName);
+    }
     am.getRegionStates().logSplit(this.serverName);
     LOG.debug("Done splitting WALs {}", this);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0e0694f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
new file mode 100644
index 0000000..4a723c0
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCleanupMetaWAL.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.master.MasterFileSystem;
+import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import static org.apache.hadoop.hbase.wal.AbstractFSWALProvider.SPLITTING_EXT;
+import static org.junit.Assert.fail;
+
+@Category(MediumTests.class)
+public class TestCleanupMetaWAL {
+  private static final Logger LOG = LoggerFactory.getLogger(TestCleanupMetaWAL.class);
+
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+      HBaseClassTestRule.forClass(TestCleanupMetaWAL.class);
+
+  @BeforeClass
+  public static void before() throws Exception {
+    TEST_UTIL.startMiniCluster(2);
+  }
+
+  @AfterClass
+  public static void after() throws Exception {
+    TEST_UTIL.shutdownMiniZKCluster();
+  }
+
+  @Test
+  public void testCleanupMetaWAL() throws Exception {
+    TEST_UTIL.createTable(TableName.valueOf("test"), "cf");
+    HRegionServer serverWithMeta = TEST_UTIL.getMiniHBaseCluster()
+        .getRegionServer(TEST_UTIL.getMiniHBaseCluster().getServerWithMeta());
+    TEST_UTIL.getAdmin()
+        .move(RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes(), null);
+    TEST_UTIL.getMiniHBaseCluster().killRegionServer(serverWithMeta.getServerName());
+    TEST_UTIL.waitFor(10000, () ->
+        TEST_UTIL.getMiniHBaseCluster().getMaster().getProcedures().stream()
+            .filter(p -> p instanceof ServerCrashProcedure && p.isFinished()).count() > 0);
+    MasterFileSystem fs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
+    Path walPath = new Path(fs.getWALRootDir(), HConstants.HREGION_LOGDIR_NAME);
+    for (FileStatus status : FSUtils.listStatus(fs.getFileSystem(), walPath)) {
+      if (status.getPath().toString().contains(SPLITTING_EXT)) {
+        fail("Should not have splitting wal dir here:" + status);
+      }
+    }
+
+
+  }
+}


[41/51] [abbrv] hbase git commit: HBASE-21541 Move MetaTableLocator.verifyRegionLocation to hbase-rsgroup module

Posted by el...@apache.org.
HBASE-21541 Move MetaTableLocator.verifyRegionLocation to hbase-rsgroup module


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8bf966c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8bf966c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8bf966c8

Branch: refs/heads/HBASE-20952
Commit: 8bf966c8e936dec4d83bcbe85c5aab543f14a0df
Parents: 59cfe2e
Author: zhangduo <zh...@apache.org>
Authored: Tue Dec 4 22:33:36 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Dec 4 22:33:36 2018 +0800

----------------------------------------------------------------------
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   3 +-
 .../apache/hadoop/hbase/rsgroup/Utility.java    | 196 ++++++++++-
 .../hadoop/hbase/rsgroup/TestUtility.java       | 229 +++++++++++++
 .../java/org/apache/hadoop/hbase/Server.java    |  15 +-
 .../hbase/master/MasterMetaBootstrap.java       |   3 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   5 +-
 .../hbase/master/MasterStatusServlet.java       |   8 +-
 .../master/procedure/ProcedureSyncWait.java     |   8 +-
 .../master/snapshot/MasterSnapshotVerifier.java |   4 +-
 .../master/snapshot/TakeSnapshotHandler.java    |   4 +-
 .../flush/MasterFlushTableProcedureManager.java |   6 +-
 .../hbase/regionserver/HRegionServer.java       |  16 -
 .../regionserver/ReplicationSyncUp.java         |   6 -
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  10 +-
 .../resources/hbase-webapps/master/table.jsp    |   4 +-
 .../hadoop/hbase/MockRegionServerServices.java  |   6 -
 .../hadoop/hbase/TestMetaTableAccessor.java     |  10 +-
 .../hadoop/hbase/TestMetaTableLocator.java      | 261 +++------------
 .../hbase/client/TestMetaWithReplicas.java      |   4 +-
 .../hbase/master/MockNoopMasterServices.java    |   6 -
 .../hadoop/hbase/master/MockRegionServer.java   |   6 -
 .../hbase/master/TestActiveMasterManager.java   |   6 -
 .../hbase/master/cleaner/TestHFileCleaner.java  |   6 -
 .../master/cleaner/TestHFileLinkCleaner.java    |   6 -
 .../hbase/master/cleaner/TestLogsCleaner.java   |   7 -
 .../cleaner/TestReplicationHFileCleaner.java    |   6 -
 .../regionserver/TestHeapMemoryManager.java     |   6 -
 .../regionserver/TestRegionServerNoMaster.java  |   5 +-
 .../hbase/regionserver/TestSplitLogWorker.java  |   6 -
 .../hbase/regionserver/TestWALLockup.java       |   6 -
 .../TestReplicationTrackerZKImpl.java           |   6 -
 .../TestReplicationSourceManager.java           |   6 -
 .../security/token/TestTokenAuthentication.java |   6 -
 .../apache/hadoop/hbase/util/MockServer.java    |   7 -
 .../hbase/zookeeper/MetaTableLocator.java       | 329 +++----------------
 .../apache/hadoop/hbase/zookeeper/ZKUtil.java   |   7 +-
 36 files changed, 551 insertions(+), 674 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index cdd68d1..6f54c31 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -757,8 +757,7 @@ final class RSGroupInfoManagerImpl implements RSGroupInfoManager {
         found.set(true);
         try {
           boolean rootMetaFound =
-              masterServices.getMetaTableLocator().verifyMetaRegionLocation(
-                  conn, masterServices.getZooKeeper(), 1);
+            Utility.verifyMetaRegionLocation(conn, masterServices.getZooKeeper(), 1);
           if (rootMetaFound) {
             MetaTableAccessor.Visitor visitor = new DefaultVisitorBase() {
               @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
index 4d7bf43..d5450c4 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/Utility.java
@@ -19,19 +19,49 @@
  */
 package org.apache.hadoop.hbase.rsgroup;
 
+import java.io.EOFException;
+import java.io.IOException;
+import java.net.ConnectException;
+import java.net.NoRouteToHostException;
+import java.net.SocketException;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
 import java.util.HashSet;
+import java.util.Locale;
 import java.util.Set;
-
+import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.ipc.FailedServerException;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 
 /**
  * Utility for this RSGroup package in hbase-rsgroup.
  */
 @InterfaceAudience.Private
 final class Utility {
+
+  private static final Logger LOG = LoggerFactory.getLogger(Utility.class);
+
   private Utility() {
   }
 
@@ -45,9 +75,171 @@ final class Utility {
       return onlineServers;
     }
 
-    for(ServerName server: master.getServerManager().getOnlineServers().keySet()) {
+    for (ServerName server : master.getServerManager().getOnlineServers().keySet()) {
       onlineServers.add(server.getAddress());
     }
     return onlineServers;
   }
+
+  /**
+   * Verify <code>hbase:meta</code> is deployed and accessible.
+   * @param hConnection the connection to use
+   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
+   * @param timeout How long to wait on zk for meta address (passed through to the internal call to
+   *          {@link #getMetaServerConnection}.
+   * @return True if the <code>hbase:meta</code> location is healthy.
+   * @throws IOException if the number of retries for getting the connection is exceeded
+   * @throws InterruptedException if waiting for the socket operation fails
+   */
+  public static boolean verifyMetaRegionLocation(ClusterConnection hConnection, ZKWatcher zkw,
+      final long timeout) throws InterruptedException, IOException {
+    return verifyMetaRegionLocation(hConnection, zkw, timeout, RegionInfo.DEFAULT_REPLICA_ID);
+  }
+
+  /**
+   * Verify <code>hbase:meta</code> is deployed and accessible.
+   * @param connection the connection to use
+   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
+   * @param timeout How long to wait on zk for meta address (passed through to
+   * @param replicaId the ID of the replica
+   * @return True if the <code>hbase:meta</code> location is healthy.
+   * @throws InterruptedException if waiting for the socket operation fails
+   * @throws IOException if the number of retries for getting the connection is exceeded
+   */
+  public static boolean verifyMetaRegionLocation(ClusterConnection connection, ZKWatcher zkw,
+      final long timeout, int replicaId) throws InterruptedException, IOException {
+    AdminProtos.AdminService.BlockingInterface service = null;
+    try {
+      service = getMetaServerConnection(connection, zkw, timeout, replicaId);
+    } catch (NotAllMetaRegionsOnlineException e) {
+      // Pass
+    } catch (ServerNotRunningYetException e) {
+      // Pass -- remote server is not up so can't be carrying root
+    } catch (UnknownHostException e) {
+      // Pass -- server name doesn't resolve so it can't be assigned anything.
+    } catch (RegionServerStoppedException e) {
+      // Pass -- server name sends us to a server that is dying or already dead.
+    }
+    return (service != null) && verifyRegionLocation(connection, service,
+      MetaTableLocator.getMetaRegionLocation(zkw, replicaId),
+      RegionReplicaUtil.getRegionInfoForReplica(RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId)
+        .getRegionName());
+  }
+
+  /**
+   * Verify we can connect to <code>hostingServer</code> and that its carrying
+   * <code>regionName</code>.
+   * @param hostingServer Interface to the server hosting <code>regionName</code>
+   * @param address The servername that goes with the <code>metaServer</code> interface. Used
+   *          logging.
+   * @param regionName The regionname we are interested in.
+   * @return True if we were able to verify the region located at other side of the interface.
+   */
+  // TODO: We should be able to get the ServerName from the AdminProtocol
+  // rather than have to pass it in. Its made awkward by the fact that the
+  // HRI is likely a proxy against remote server so the getServerName needs
+  // to be fixed to go to a local method or to a cache before we can do this.
+  private static boolean verifyRegionLocation(final ClusterConnection connection,
+      AdminService.BlockingInterface hostingServer, final ServerName address,
+      final byte[] regionName) {
+    if (hostingServer == null) {
+      LOG.info("Passed hostingServer is null");
+      return false;
+    }
+    Throwable t;
+    HBaseRpcController controller = connection.getRpcControllerFactory().newController();
+    try {
+      // Try and get regioninfo from the hosting server.
+      return ProtobufUtil.getRegionInfo(controller, hostingServer, regionName) != null;
+    } catch (ConnectException e) {
+      t = e;
+    } catch (RetriesExhaustedException e) {
+      t = e;
+    } catch (RemoteException e) {
+      IOException ioe = e.unwrapRemoteException();
+      t = ioe;
+    } catch (IOException e) {
+      Throwable cause = e.getCause();
+      if (cause != null && cause instanceof EOFException) {
+        t = cause;
+      } else if (cause != null && cause.getMessage() != null &&
+        cause.getMessage().contains("Connection reset")) {
+        t = cause;
+      } else {
+        t = e;
+      }
+    }
+    LOG.info("Failed verification of " + Bytes.toStringBinary(regionName) + " at address=" +
+      address + ", exception=" + t.getMessage());
+    return false;
+  }
+
+  /**
+   * Gets a connection to the server hosting meta, as reported by ZooKeeper, waiting up to the
+   * specified timeout for availability.
+   * <p>
+   * WARNING: Does not retry. Use an {@link org.apache.hadoop.hbase.client.HTable} instead.
+   * @param connection the connection to use
+   * @param zkw reference to the {@link ZKWatcher} which also contains configuration and operation
+   * @param timeout How long to wait on meta location
+   * @param replicaId the ID of the replica
+   * @return connection to server hosting meta
+   * @throws InterruptedException if waiting for the socket operation fails
+   * @throws IOException if the number of retries for getting the connection is exceeded
+   */
+  private static AdminService.BlockingInterface getMetaServerConnection(
+      ClusterConnection connection, ZKWatcher zkw, long timeout, int replicaId)
+      throws InterruptedException, IOException {
+    return getCachedConnection(connection,
+      MetaTableLocator.waitMetaRegionLocation(zkw, replicaId, timeout));
+  }
+
+  /**
+   * @param sn ServerName to get a connection against.
+   * @return The AdminProtocol we got when we connected to <code>sn</code> May have come from cache,
+   *         may not be good, may have been setup by this invocation, or may be null.
+   * @throws IOException if the number of retries for getting the connection is exceeded
+   */
+  private static AdminService.BlockingInterface getCachedConnection(ClusterConnection connection,
+      ServerName sn) throws IOException {
+    if (sn == null) {
+      return null;
+    }
+    AdminService.BlockingInterface service = null;
+    try {
+      service = connection.getAdmin(sn);
+    } catch (RetriesExhaustedException e) {
+      if (e.getCause() != null && e.getCause() instanceof ConnectException) {
+        LOG.debug("Catch this; presume it means the cached connection has gone bad.");
+      } else {
+        throw e;
+      }
+    } catch (SocketTimeoutException e) {
+      LOG.debug("Timed out connecting to " + sn);
+    } catch (NoRouteToHostException e) {
+      LOG.debug("Connecting to " + sn, e);
+    } catch (SocketException e) {
+      LOG.debug("Exception connecting to " + sn);
+    } catch (UnknownHostException e) {
+      LOG.debug("Unknown host exception connecting to  " + sn);
+    } catch (FailedServerException e) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Server " + sn + " is in failed server list.");
+      }
+    } catch (IOException ioe) {
+      Throwable cause = ioe.getCause();
+      if (ioe instanceof ConnectException) {
+        LOG.debug("Catch. Connect refused.");
+      } else if (cause != null && cause instanceof EOFException) {
+        LOG.debug("Catch. Other end disconnected us.");
+      } else if (cause != null && cause.getMessage() != null &&
+        cause.getMessage().toLowerCase(Locale.ROOT).contains("connection reset")) {
+        LOG.debug("Catch. Connection reset.");
+      } else {
+        throw ioe;
+      }
+
+    }
+    return service;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUtility.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUtility.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUtility.java
new file mode 100644
index 0000000..0065610
--- /dev/null
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestUtility.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.rsgroup;
+
+import static org.junit.Assert.assertFalse;
+
+import java.io.IOException;
+import java.net.ConnectException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
+import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
+import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
+
+@Category({ MiscTests.class, MediumTests.class })
+public class TestUtility {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestUtility.class);
+
+  private static final Logger LOG = LoggerFactory.getLogger(TestUtility.class);
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static final ServerName SN =
+    ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
+
+  private ZKWatcher watcher;
+
+  private Abortable abortable;
+
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    // Set this down so tests run quicker
+    UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
+    UTIL.startMiniZKCluster();
+  }
+
+  @AfterClass
+  public static void afterClass() throws IOException {
+    UTIL.getZkCluster().shutdown();
+  }
+
+  @Before
+  public void before() throws IOException {
+    this.abortable = new Abortable() {
+      @Override
+      public void abort(String why, Throwable e) {
+        LOG.info(why, e);
+      }
+
+      @Override
+      public boolean isAborted() {
+        return false;
+      }
+    };
+    this.watcher =
+      new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), this.abortable, true);
+  }
+
+  @After
+  public void after() {
+    try {
+      // Clean out meta location or later tests will be confused... they presume
+      // start fresh in zk.
+      MetaTableLocator.deleteMetaLocation(this.watcher);
+    } catch (KeeperException e) {
+      LOG.warn("Unable to delete hbase:meta location", e);
+    }
+
+    this.watcher.close();
+  }
+
+  /**
+   * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely want
+   *          to pass a mocked HRS; can be null.
+   * @param client A mocked ClientProtocol instance, can be null
+   * @return Mock up a connection that returns a {@link Configuration} when
+   *         {@link org.apache.hadoop.hbase.client.ClusterConnection#getConfiguration()} is called,
+   *         a 'location' when
+   *         {@link org.apache.hadoop.hbase.client.RegionLocator#getRegionLocation(byte[], boolean)}
+   *         is called, and that returns the passed
+   *         {@link AdminProtos.AdminService.BlockingInterface} instance when
+   *         {@link org.apache.hadoop.hbase.client.ClusterConnection#getAdmin(ServerName)} is
+   *         called, returns the passed {@link ClientProtos.ClientService.BlockingInterface}
+   *         instance when
+   *         {@link org.apache.hadoop.hbase.client.ClusterConnection#getClient(ServerName)} is
+   *         called.
+   */
+  private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
+      final ClientProtos.ClientService.BlockingInterface client) throws IOException {
+    ClusterConnection connection =
+      HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
+    Mockito.doNothing().when(connection).close();
+    // Make it so we return any old location when asked.
+    final HRegionLocation anyLocation =
+      new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, SN);
+    Mockito.when(connection.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(),
+      Mockito.anyBoolean())).thenReturn(anyLocation);
+    Mockito.when(connection.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any()))
+      .thenReturn(anyLocation);
+    if (admin != null) {
+      // If a call to getHRegionConnection, return this implementation.
+      Mockito.when(connection.getAdmin(Mockito.any())).thenReturn(admin);
+    }
+    if (client != null) {
+      // If a call to getClient, return this implementation.
+      Mockito.when(connection.getClient(Mockito.any())).thenReturn(client);
+    }
+    return connection;
+  }
+
+  private void testVerifyMetaRegionLocationWithException(Exception ex)
+      throws IOException, InterruptedException, KeeperException, ServiceException {
+    // Mock an ClientProtocol.
+    final ClientProtos.ClientService.BlockingInterface implementation =
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+
+    ClusterConnection connection = mockConnection(null, implementation);
+
+    // If a 'get' is called on mocked interface, throw connection refused.
+    Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any()))
+      .thenThrow(new ServiceException(ex));
+
+    long timeout = UTIL.getConfiguration().getLong("hbase.catalog.verification.timeout", 1000);
+    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPENING);
+    assertFalse(Utility.verifyMetaRegionLocation(connection, watcher, timeout));
+
+    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
+    assertFalse(Utility.verifyMetaRegionLocation(connection, watcher, timeout));
+  }
+
+  /**
+   * Test get of meta region fails properly if nothing to connect to.
+   */
+  @Test
+  public void testVerifyMetaRegionLocationFails()
+      throws IOException, InterruptedException, KeeperException, ServiceException {
+    ClusterConnection connection = Mockito.mock(ClusterConnection.class);
+    ServiceException connectException =
+      new ServiceException(new ConnectException("Connection refused"));
+    final AdminProtos.AdminService.BlockingInterface implementation =
+      Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
+    Mockito.when(implementation.getRegionInfo((RpcController) Mockito.any(),
+      (GetRegionInfoRequest) Mockito.any())).thenThrow(connectException);
+    Mockito.when(connection.getAdmin(Mockito.any())).thenReturn(implementation);
+    RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class);
+    Mockito.when(controllerFactory.newController())
+      .thenReturn(Mockito.mock(HBaseRpcController.class));
+    Mockito.when(connection.getRpcControllerFactory()).thenReturn(controllerFactory);
+
+    ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
+    MetaTableLocator.setMetaLocation(this.watcher, sn, RegionState.State.OPENING);
+    assertFalse(Utility.verifyMetaRegionLocation(connection, watcher, 100));
+    MetaTableLocator.setMetaLocation(this.watcher, sn, RegionState.State.OPEN);
+    assertFalse(Utility.verifyMetaRegionLocation(connection, watcher, 100));
+  }
+
+  /**
+   * Test we survive a connection refused {@link ConnectException}
+   */
+  @Test
+  public void testGetMetaServerConnectionFails()
+      throws IOException, InterruptedException, KeeperException, ServiceException {
+    testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
+  }
+
+  /**
+   * Test that verifyMetaRegionLocation properly handles getting a ServerNotRunningException. See
+   * HBASE-4470. Note this doesn't check the exact exception thrown in the HBASE-4470 as there it is
+   * thrown from getHConnection() and here it is thrown from get() -- but those are both called from
+   * the same function anyway, and this way is less invasive than throwing from getHConnection would
+   * be.
+   */
+  @Test
+  public void testVerifyMetaRegionServerNotRunning()
+      throws IOException, InterruptedException, KeeperException, ServiceException {
+    testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index 53a757a..fb898ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,16 +17,14 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
-import java.io.IOException;
-
 /**
  * Defines a curated set of shared functions implemented by HBase servers (Masters
  * and RegionServers). For use internally only. Be judicious adding API. Changes cause ripples
@@ -64,14 +61,6 @@ public interface Server extends Abortable, Stoppable {
   ClusterConnection getClusterConnection();
 
   /**
-   * Returns instance of {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
-   * running inside this server. This MetaServerLocator is started and stopped by server, clients
-   * shouldn't manage it's lifecycle.
-   * @return instance of {@link MetaTableLocator} associated with this server.
-   */
-  MetaTableLocator getMetaTableLocator();
-
-  /**
    * @return The unique server name for this server.
    */
   ServerName getServerName();

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
index ce21465..e57817e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMetaBootstrap.java
@@ -65,8 +65,7 @@ class MasterMetaBootstrap {
       throw new IllegalStateException("hbase:meta must be initialized first before we can " +
           "assign out its replicas");
     }
-    ServerName metaServername =
-        this.master.getMetaTableLocator().getMetaRegionLocation(this.master.getZooKeeper());
+    ServerName metaServername = MetaTableLocator.getMetaRegionLocation(this.master.getZooKeeper());
     for (int i = 1; i < numReplicas; i++) {
       // Get current meta state for replica from zk.
       RegionState metaState = MetaTableLocator.getMetaRegionState(master.getZooKeeper(), i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index fb76bec..cd838d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -102,6 +102,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
@@ -1550,9 +1551,9 @@ public class MasterRpcServices extends RSRpcServices
       }
       Pair<RegionInfo, ServerName> pair =
         MetaTableAccessor.getRegion(master.getConnection(), regionName);
-      if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(),regionName)) {
+      if (Bytes.equals(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName(), regionName)) {
         pair = new Pair<>(RegionInfoBuilder.FIRST_META_REGIONINFO,
-            master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
+          MetaTableLocator.getMetaRegionLocation(master.getZooKeeper()));
       }
       if (pair == null) {
         throw new UnknownRegionException(Bytes.toString(regionName));

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
index 560a04a..58e57c4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
@@ -22,17 +22,15 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
-
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.tmpl.master.MasterStatusTmpl;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * The servlet responsible for rendering the index page of the
@@ -82,9 +80,7 @@ public class MasterStatusServlet extends HttpServlet {
   }
 
   private ServerName getMetaLocationOrNull(HMaster master) {
-    MetaTableLocator metaTableLocator = master.getMetaTableLocator();
-    return metaTableLocator == null ? null :
-      metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
+    return MetaTableLocator.getMetaRegionLocation(master.getZooKeeper());
   }
 
   private Map<String, Integer> getFragmentationInfo(

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
index 9353124..46621da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedureSyncWait.java
@@ -25,7 +25,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
@@ -225,12 +225,12 @@ public final class ProcedureSyncWait {
   protected static void waitMetaRegions(final MasterProcedureEnv env) throws IOException {
     int timeout = env.getMasterConfiguration().getInt("hbase.client.catalog.timeout", 10000);
     try {
-      if (env.getMasterServices().getMetaTableLocator().waitMetaRegionLocation(
-            env.getMasterServices().getZooKeeper(), timeout) == null) {
+      if (MetaTableLocator.waitMetaRegionLocation(env.getMasterServices().getZooKeeper(),
+        timeout) == null) {
         throw new NotAllMetaRegionsOnlineException();
       }
     } catch (InterruptedException e) {
-      throw (InterruptedIOException)new InterruptedIOException().initCause(e);
+      throw (InterruptedIOException) new InterruptedIOException().initCause(e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index e78f0b5..c9dc0c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -42,6 +41,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
@@ -159,7 +159,7 @@ public final class MasterSnapshotVerifier {
   private void verifyRegions(final SnapshotManifest manifest) throws IOException {
     List<RegionInfo> regions;
     if (TableName.META_TABLE_NAME.equals(tableName)) {
-      regions = new MetaTableLocator().getMetaRegions(services.getZooKeeper());
+      regions = MetaTableLocator.getMetaRegions(services.getZooKeeper());
     } else {
       regions = MetaTableAccessor.getTableRegions(services.getConnection(), tableName);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index 1dce79f..3b7d65a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -24,7 +24,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.CancellationException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -60,6 +59,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 
 /**
@@ -193,7 +193,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
 
       List<Pair<RegionInfo, ServerName>> regionsAndLocations;
       if (TableName.META_TABLE_NAME.equals(snapshotTable)) {
-        regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(
+        regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations(
           server.getZooKeeper());
       } else {
         regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
index fee3dde..1f7a5e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/MasterFlushTableProcedureManager.java
@@ -24,7 +24,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ThreadPoolExecutor;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -43,14 +42,15 @@ import org.apache.hadoop.hbase.procedure.ProcedureCoordinatorRpcs;
 import org.apache.hadoop.hbase.procedure.ZKProcedureCoordinator;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessChecker;
-import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
@@ -131,7 +131,7 @@ public class MasterFlushTableProcedureManager extends MasterProcedureManager {
     List<Pair<RegionInfo, ServerName>> regionsAndLocations;
 
     if (TableName.META_TABLE_NAME.equals(tableName)) {
-      regionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(
+      regionsAndLocations = MetaTableLocator.getMetaRegionsAndLocations(
         master.getZooKeeper());
     } else {
       regionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index b9d606d..6242d36 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -261,14 +261,6 @@ public class HRegionServer extends HasThread implements
    */
   protected ClusterConnection clusterConnection;
 
-  /*
-   * Long-living meta table locator, which is created when the server is started and stopped
-   * when server shuts down. References to this locator shall be used to perform according
-   * operations in EventHandlers. Primary reason for this decision is to make it mockable
-   * for tests.
-   */
-  protected MetaTableLocator metaTableLocator;
-
   /**
    * Go here to get table descriptors.
    */
@@ -836,7 +828,6 @@ public class HRegionServer extends HasThread implements
   protected synchronized void setupClusterConnection() throws IOException {
     if (clusterConnection == null) {
       clusterConnection = createClusterConnection();
-      metaTableLocator = new MetaTableLocator();
     }
   }
 
@@ -1104,8 +1095,6 @@ public class HRegionServer extends HasThread implements
       LOG.info("stopping server " + this.serverName);
     }
 
-    // so callers waiting for meta without timeout can stop
-    if (this.metaTableLocator != null) this.metaTableLocator.stop();
     if (this.clusterConnection != null && !clusterConnection.isClosed()) {
       try {
         this.clusterConnection.close();
@@ -2174,11 +2163,6 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public MetaTableLocator getMetaTableLocator() {
-    return this.metaTableLocator;
-  }
-
-  @Override
   public void stop(final String msg) {
     stop(msg, false, RpcServer.getRequestUser().orElse(null));
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
index 62068fd..c7bccb3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSyncUp.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
@@ -130,11 +129,6 @@ public class ReplicationSyncUp extends Configured implements Tool {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf(hostname, 1234, 1L);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index b43262d..14706c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -134,7 +134,6 @@ import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -144,20 +143,20 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
-import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.yetus.audience.InterfaceStability;
 import org.apache.zookeeper.KeeperException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
 import org.apache.hbase.thirdparty.com.google.common.base.Joiner;
 import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
 import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList;
 import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
 import org.apache.hbase.thirdparty.com.google.common.collect.Multimap;
 import org.apache.hbase.thirdparty.com.google.common.collect.Ordering;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 import org.apache.hbase.thirdparty.com.google.common.collect.TreeMultimap;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -2002,11 +2001,6 @@ public class HBaseFsck extends Configured implements Closeable {
     });
   }
 
-  private ServerName getMetaRegionServerName(int replicaId)
-  throws IOException, KeeperException {
-    return new MetaTableLocator().getMetaRegionLocation(zkw, replicaId);
-  }
-
   /**
    * Contacts each regionserver and fetches metadata about regions.
    * @param regionServerList - the list of region servers to connect to

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 372e5ca..5b996eb396 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -77,8 +77,6 @@
 <%
   HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
   Configuration conf = master.getConfiguration();
-
-  MetaTableLocator metaTableLocator = new MetaTableLocator();
   String fqtn = request.getParameter("name");
   final String escaped_fqtn = StringEscapeUtils.escapeHtml4(fqtn);
   Table table;
@@ -202,7 +200,7 @@ if ( fqtn != null ) {
   for (int j = 0; j < numMetaReplicas; j++) {
     RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
                             RegionInfoBuilder.FIRST_META_REGIONINFO, j);
-    ServerName metaLocation = metaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
+    ServerName metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
     for (int i = 0; i < 1; i++) {
       String hostAndPort = "";
       String readReq = "N/A";

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index c0a2a8c..ff0a88c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -162,11 +161,6 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public MetaTableLocator getMetaTableLocator() {
-    return null;
-  }
-
-  @Override
   public ZKWatcher getZooKeeper() {
     return zkw;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
index 0d21fb8..5582178 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableAccessor.java
@@ -247,13 +247,11 @@ public class TestMetaTableAccessor {
     abstract void metaTask() throws Throwable;
   }
 
-  @Test public void testGetRegionsFromMetaTable()
-  throws IOException, InterruptedException {
-    List<RegionInfo> regions =
-      new MetaTableLocator().getMetaRegions(UTIL.getZooKeeperWatcher());
+  @Test
+  public void testGetRegionsFromMetaTable() throws IOException, InterruptedException {
+    List<RegionInfo> regions = MetaTableLocator.getMetaRegions(UTIL.getZooKeeperWatcher());
     assertTrue(regions.size() >= 1);
-    assertTrue(new MetaTableLocator().getMetaRegionsAndLocations(
-      UTIL.getZooKeeperWatcher()).size() >= 1);
+    assertTrue(MetaTableLocator.getMetaRegionsAndLocations(UTIL.getZooKeeperWatcher()).size() >= 1);
   }
 
   @Test public void testTableExists() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
index db93c0c..9274fa0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestMetaTableLocator.java
@@ -18,18 +18,10 @@
 package org.apache.hadoop.hbase;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.net.ConnectException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
@@ -51,8 +43,6 @@ import org.slf4j.LoggerFactory;
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
 import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException;
 
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse;
@@ -60,31 +50,34 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRespons
 /**
  * Test {@link org.apache.hadoop.hbase.zookeeper.MetaTableLocator}
  */
-@Category({MiscTests.class, MediumTests.class})
+@Category({ MiscTests.class, MediumTests.class })
 public class TestMetaTableLocator {
 
   @ClassRule
   public static final HBaseClassTestRule CLASS_RULE =
-      HBaseClassTestRule.forClass(TestMetaTableLocator.class);
+    HBaseClassTestRule.forClass(TestMetaTableLocator.class);
 
   private static final Logger LOG = LoggerFactory.getLogger(TestMetaTableLocator.class);
   private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
   private static final ServerName SN =
-      ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
+    ServerName.valueOf("example.org", 1234, System.currentTimeMillis());
   private ZKWatcher watcher;
   private Abortable abortable;
 
-  @BeforeClass public static void beforeClass() throws Exception {
+  @BeforeClass
+  public static void beforeClass() throws Exception {
     // Set this down so tests run quicker
     UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 3);
     UTIL.startMiniZKCluster();
   }
 
-  @AfterClass public static void afterClass() throws IOException {
+  @AfterClass
+  public static void afterClass() throws IOException {
     UTIL.getZkCluster().shutdown();
   }
 
-  @Before public void before() throws IOException {
+  @Before
+  public void before() throws IOException {
     this.abortable = new Abortable() {
       @Override
       public void abort(String why, Throwable e) {
@@ -92,19 +85,20 @@ public class TestMetaTableLocator {
       }
 
       @Override
-      public boolean isAborted()  {
+      public boolean isAborted() {
         return false;
       }
     };
-    this.watcher = new ZKWatcher(UTIL.getConfiguration(),
-      this.getClass().getSimpleName(), this.abortable, true);
+    this.watcher =
+      new ZKWatcher(UTIL.getConfiguration(), this.getClass().getSimpleName(), this.abortable, true);
   }
 
-  @After public void after() {
+  @After
+  public void after() {
     try {
       // Clean out meta location or later tests will be confused... they presume
       // start fresh in zk.
-      new MetaTableLocator().deleteMetaLocation(this.watcher);
+      MetaTableLocator.deleteMetaLocation(this.watcher);
     } catch (KeeperException e) {
       LOG.warn("Unable to delete hbase:meta location", e);
     }
@@ -115,178 +109,47 @@ public class TestMetaTableLocator {
   /**
    * Test normal operations
    */
-  @Test public void testMetaLookup()
-          throws IOException, InterruptedException, ServiceException, KeeperException {
+  @Test
+  public void testMetaLookup()
+      throws IOException, InterruptedException, ServiceException, KeeperException {
     final ClientProtos.ClientService.BlockingInterface client =
-            Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
 
-    Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
-            thenReturn(GetResponse.newBuilder().build());
+    Mockito.when(client.get((RpcController) Mockito.any(), (GetRequest) Mockito.any()))
+      .thenReturn(GetResponse.newBuilder().build());
 
-    final MetaTableLocator mtl = new MetaTableLocator();
-    assertNull(mtl.getMetaRegionLocation(this.watcher));
+    assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
     for (RegionState.State state : RegionState.State.values()) {
-      if (state.equals(RegionState.State.OPEN))
+      if (state.equals(RegionState.State.OPEN)) {
         continue;
+      }
       MetaTableLocator.setMetaLocation(this.watcher, SN, state);
-      assertNull(mtl.getMetaRegionLocation(this.watcher));
+      assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
       assertEquals(state, MetaTableLocator.getMetaRegionState(this.watcher).getState());
     }
     MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
-    assertEquals(SN, mtl.getMetaRegionLocation(this.watcher));
+    assertEquals(SN, MetaTableLocator.getMetaRegionLocation(this.watcher));
     assertEquals(RegionState.State.OPEN,
       MetaTableLocator.getMetaRegionState(this.watcher).getState());
 
-    mtl.deleteMetaLocation(this.watcher);
+    MetaTableLocator.deleteMetaLocation(this.watcher);
     assertNull(MetaTableLocator.getMetaRegionState(this.watcher).getServerName());
     assertEquals(RegionState.State.OFFLINE,
-        MetaTableLocator.getMetaRegionState(this.watcher).getState());
-    assertNull(mtl.getMetaRegionLocation(this.watcher));
-  }
-
-
-  /**
-   * Test interruptable while blocking wait on meta.
-   * @throws IOException
-   * @throws ServiceException
-   * @throws InterruptedException
-   */
-  @Test public void testInterruptWaitOnMeta()
-  throws IOException, InterruptedException, ServiceException {
-    final ClientProtos.ClientService.BlockingInterface client =
-      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
-
-    Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
-    thenReturn(GetResponse.newBuilder().build());
-
-    final MetaTableLocator mtl = new MetaTableLocator();
-    ServerName meta = new MetaTableLocator().getMetaRegionLocation(this.watcher);
-    assertNull(meta);
-    Thread t = new Thread() {
-      @Override
-      public void run() {
-        try {
-          mtl.waitMetaRegionLocation(watcher);
-        } catch (InterruptedException e) {
-          throw new RuntimeException("Interrupted", e);
-        }
-      }
-    };
-    t.start();
-    while (!t.isAlive())
-      Threads.sleep(1);
-    Threads.sleep(1);
-    assertTrue(t.isAlive());
-    mtl.stop();
-    // Join the thread... should exit shortly.
-    t.join();
-  }
-
-  private void testVerifyMetaRegionLocationWithException(Exception ex)
-  throws IOException, InterruptedException, KeeperException, ServiceException {
-    // Mock an ClientProtocol.
-    final ClientProtos.ClientService.BlockingInterface implementation =
-      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
-
-    ClusterConnection connection = mockConnection(null, implementation);
-
-    // If a 'get' is called on mocked interface, throw connection refused.
-    Mockito.when(implementation.get((RpcController) Mockito.any(), (GetRequest) Mockito.any())).
-      thenThrow(new ServiceException(ex));
-
-    long timeout = UTIL.getConfiguration().
-            getLong("hbase.catalog.verification.timeout", 1000);
-    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPENING);
-    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(
-      connection, watcher, timeout));
-
-    MetaTableLocator.setMetaLocation(this.watcher, SN, RegionState.State.OPEN);
-    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(
-            connection, watcher, timeout));
-  }
-
-  /**
-   * Test we survive a connection refused {@link ConnectException}
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   * @throws ServiceException
-   */
-  @Test
-  public void testGetMetaServerConnectionFails()
-  throws IOException, InterruptedException, KeeperException, ServiceException {
-    testVerifyMetaRegionLocationWithException(new ConnectException("Connection refused"));
-  }
-
-  /**
-   * Test that verifyMetaRegionLocation properly handles getting a
-   * ServerNotRunningException. See HBASE-4470.
-   * Note this doesn't check the exact exception thrown in the
-   * HBASE-4470 as there it is thrown from getHConnection() and
-   * here it is thrown from get() -- but those are both called
-   * from the same function anyway, and this way is less invasive than
-   * throwing from getHConnection would be.
-   *
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   * @throws ServiceException
-   */
-  @Test
-  public void testVerifyMetaRegionServerNotRunning()
-  throws IOException, InterruptedException, KeeperException, ServiceException {
-    testVerifyMetaRegionLocationWithException(new ServerNotRunningYetException("mock"));
-  }
-
-  /**
-   * Test get of meta region fails properly if nothing to connect to.
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
-   * @throws ServiceException
-   */
-  @Test
-  public void testVerifyMetaRegionLocationFails()
-  throws IOException, InterruptedException, KeeperException, ServiceException {
-    ClusterConnection connection = Mockito.mock(ClusterConnection.class);
-    ServiceException connectException =
-      new ServiceException(new ConnectException("Connection refused"));
-    final AdminProtos.AdminService.BlockingInterface implementation =
-      Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
-    Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
-      (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
-    Mockito.when(connection.getAdmin(Mockito.any())).
-      thenReturn(implementation);
-        RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class);
-        Mockito.when(controllerFactory.newController()).thenReturn(
-          Mockito.mock(HBaseRpcController.class));
-        Mockito.when(connection.getRpcControllerFactory()).thenReturn(controllerFactory);
-
-    ServerName sn = ServerName.valueOf("example.com", 1234, System.currentTimeMillis());
-    MetaTableLocator.setMetaLocation(this.watcher,
-            sn,
-            RegionState.State.OPENING);
-    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection, watcher, 100));
-    MetaTableLocator.setMetaLocation(this.watcher, sn, RegionState.State.OPEN);
-    assertFalse(new MetaTableLocator().verifyMetaRegionLocation(connection, watcher, 100));
+      MetaTableLocator.getMetaRegionState(this.watcher).getState());
+    assertNull(MetaTableLocator.getMetaRegionLocation(this.watcher));
   }
 
-  @Test (expected = NotAllMetaRegionsOnlineException.class)
-  public void testTimeoutWaitForMeta()
-  throws IOException, InterruptedException {
-    new MetaTableLocator().waitMetaRegionLocation(watcher, 100);
+  @Test(expected = NotAllMetaRegionsOnlineException.class)
+  public void testTimeoutWaitForMeta() throws IOException, InterruptedException {
+    MetaTableLocator.waitMetaRegionLocation(watcher, 100);
   }
 
   /**
    * Test waiting on meat w/ no timeout specified.
-   * @throws IOException
-   * @throws InterruptedException
-   * @throws KeeperException
    */
-  @Test public void testNoTimeoutWaitForMeta()
-  throws IOException, InterruptedException, KeeperException {
-    final MetaTableLocator mtl = new MetaTableLocator();
-    ServerName hsa = mtl.getMetaRegionLocation(watcher);
+  @Test
+  public void testNoTimeoutWaitForMeta() throws IOException, InterruptedException, KeeperException {
+    ServerName hsa = MetaTableLocator.getMetaRegionLocation(watcher);
     assertNull(hsa);
 
     // Now test waiting on meta location getting set.
@@ -298,54 +161,12 @@ public class TestMetaTableLocator {
     // Join the thread... should exit shortly.
     t.join();
     // Now meta is available.
-    assertTrue(mtl.getMetaRegionLocation(watcher).equals(hsa));
-  }
-
-  /**
-   * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
-   * want to pass a mocked HRS; can be null.
-   * @param client A mocked ClientProtocol instance, can be null
-   * @return Mock up a connection that returns a {@link Configuration} when
-   * {@link org.apache.hadoop.hbase.client.ClusterConnection#getConfiguration()} is called, a 'location' when
-   * {@link org.apache.hadoop.hbase.client.RegionLocator#getRegionLocation(byte[], boolean)} is called,
-   * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
-   * {@link org.apache.hadoop.hbase.client.ClusterConnection#getAdmin(ServerName)} is called, returns the passed
-   * {@link ClientProtos.ClientService.BlockingInterface} instance when
-   * {@link org.apache.hadoop.hbase.client.ClusterConnection#getClient(ServerName)} is called.
-   * @throws IOException
-   */
-  private ClusterConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
-      final ClientProtos.ClientService.BlockingInterface client)
-  throws IOException {
-    ClusterConnection connection =
-      HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
-    Mockito.doNothing().when(connection).close();
-    // Make it so we return any old location when asked.
-    final HRegionLocation anyLocation = new HRegionLocation(HRegionInfo.FIRST_META_REGIONINFO, SN);
-    Mockito.when(connection.getRegionLocation((TableName) Mockito.any(),
-        (byte[]) Mockito.any(), Mockito.anyBoolean())).
-      thenReturn(anyLocation);
-    Mockito.when(connection.locateRegion((TableName) Mockito.any(),
-        (byte[]) Mockito.any())).
-      thenReturn(anyLocation);
-    if (admin != null) {
-      // If a call to getHRegionConnection, return this implementation.
-      Mockito.when(connection.getAdmin(Mockito.any())).
-        thenReturn(admin);
-    }
-    if (client != null) {
-      // If a call to getClient, return this implementation.
-      Mockito.when(connection.getClient(Mockito.any())).
-        thenReturn(client);
-    }
-    return connection;
+    assertTrue(MetaTableLocator.getMetaRegionLocation(watcher).equals(hsa));
   }
 
   private void startWaitAliveThenWaitItLives(final Thread t, final int ms) {
     t.start();
-    while(!t.isAlive()) {
-      // Wait
-    }
+    UTIL.waitFor(2000, t::isAlive);
     // Wait one second.
     Threads.sleep(ms);
     assertTrue("Assert " + t.getName() + " still waiting", t.isAlive());
@@ -372,9 +193,13 @@ public class TestMetaTableLocator {
 
     void doWaiting() throws InterruptedException {
       try {
-        while (new MetaTableLocator().waitMetaRegionLocation(watcher, 10000) == null);
+        for (;;) {
+          if (MetaTableLocator.waitMetaRegionLocation(watcher, 10000) != null) {
+            break;
+          }
+        }
       } catch (NotAllMetaRegionsOnlineException e) {
-        //Ignore
+        // Ignore
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index bac588a..3b14b7f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.HBaseFsckRepair;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
@@ -100,8 +101,7 @@ public class TestMetaWithReplicas {
     AssignmentManager am = TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager();
     Set<ServerName> sns = new HashSet<ServerName>();
     ServerName hbaseMetaServerName =
-        TEST_UTIL.getMiniHBaseCluster().getMaster().getMetaTableLocator().
-            getMetaRegionLocation(TEST_UTIL.getZooKeeperWatcher());
+      MetaTableLocator.getMetaRegionLocation(TEST_UTIL.getZooKeeperWatcher());
     LOG.info("HBASE:META DEPLOY: on " + hbaseMetaServerName);
     sns.add(hbaseMetaServerName);
     for (int replicaId = 1; replicaId < 3; replicaId++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index ac20dbd..9c55f57 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -54,7 +54,6 @@ import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.SyncReplicationState;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 
 public class MockNoopMasterServices implements MasterServices {
@@ -161,11 +160,6 @@ public class MockNoopMasterServices implements MasterServices {
   }
 
   @Override
-  public MetaTableLocator getMetaTableLocator() {
-    return null;
-  }
-
-  @Override
   public ClusterConnection getConnection() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index f4c2a33..844b705 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -68,7 +68,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 
 import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
@@ -305,11 +304,6 @@ class MockRegionServer implements AdminProtos.AdminService.BlockingInterface,
   }
 
   @Override
-  public MetaTableLocator getMetaTableLocator() {
-    return null;
-  }
-
-  @Override
   public ServerName getServerName() {
     return this.sn;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
index 5851dea..2300f54 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.zookeeper.ClusterStatusTracker;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKListener;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -317,11 +316,6 @@ public class TestActiveMasterManager {
       return null;
     }
 
-    @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
     public ClusterStatusTracker getClusterStatusTracker() {
       return clusterStatusTracker;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
index 9da4df4..5c8db3e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileCleaner.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.EnvironmentEdge;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -233,11 +232,6 @@ public class TestHFileCleaner {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf("regionserver,60020,000000");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
index c011ea8..119194b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestHFileLinkCleaner.java
@@ -41,7 +41,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.junit.ClassRule;
 import org.junit.Rule;
@@ -169,11 +168,6 @@ public class TestHFileLinkCleaner {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf("regionserver,60020,000000");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 882ea9d..247ed01 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -30,7 +30,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -55,7 +54,6 @@ import org.apache.hadoop.hbase.replication.ReplicationStorageFactory;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -367,11 +365,6 @@ public class TestLogsCleaner {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf("regionserver,60020,000000");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 24b930c..d162bf3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -55,7 +55,6 @@ import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -257,11 +256,6 @@ public class TestReplicationHFileCleaner {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf("regionserver,60020,000000");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
index 9f05a73..8c9ce75 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHeapMemoryManager.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerContext;
 import org.apache.hadoop.hbase.regionserver.HeapMemoryManager.TunerResult;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.junit.ClassRule;
 import org.junit.Test;
@@ -834,11 +833,6 @@ public class TestHeapMemoryManager {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf("server1",4000,12345);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
index af2861f..b9f89b7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
@@ -111,8 +111,7 @@ public class TestRegionServerNoMaster {
     HRegionServer hrs = HTU.getHBaseCluster()
       .getLiveRegionServerThreads().get(0).getRegionServer();
     ZKWatcher zkw = hrs.getZooKeeper();
-    MetaTableLocator mtl = new MetaTableLocator();
-    ServerName sn = mtl.getMetaRegionLocation(zkw);
+    ServerName sn = MetaTableLocator.getMetaRegionLocation(zkw);
     if (sn != null && !masterAddr.equals(sn)) {
       return;
     }
@@ -120,7 +119,7 @@ public class TestRegionServerNoMaster {
     ProtobufUtil.openRegion(null, hrs.getRSRpcServices(),
       hrs.getServerName(), HRegionInfo.FIRST_META_REGIONINFO);
     while (true) {
-      sn = mtl.getMetaRegionLocation(zkw);
+      sn = MetaTableLocator.getMetaRegionLocation(zkw);
       if (sn != null && sn.equals(hrs.getServerName())
           && hrs.onlineRegions.containsKey(
               HRegionInfo.FIRST_META_REGIONINFO.getEncodedName())) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
index 0c38ee3..cbf932c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitLogWorker.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.executor.ExecutorType;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -136,11 +135,6 @@ public class TestSplitLogWorker {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ChoreService getChoreService() {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
index 84b8d6c..0e20252 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWALLockup.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.junit.After;
 import org.junit.Assert;
@@ -475,11 +474,6 @@ public class TestWALLockup {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf(this.serverName);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
index 2419095..863d558 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationTrackerZKImpl.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -214,11 +213,6 @@ public class TestReplicationTrackerZKImpl {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf(this.serverName);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 0872ea7..86bbb09 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -88,7 +88,6 @@ import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
@@ -858,11 +857,6 @@ public abstract class TestReplicationSourceManager {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ServerName getServerName() {
       return ServerName.valueOf(hostname, 1234, 1L);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index afde037..e4780f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -70,7 +70,6 @@ import org.apache.hadoop.hbase.util.Sleeper;
 import org.apache.hadoop.hbase.util.Strings;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Writables;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.net.DNS;
@@ -216,11 +215,6 @@ public class TestTokenAuthentication {
     }
 
     @Override
-    public MetaTableLocator getMetaTableLocator() {
-      return null;
-    }
-
-    @Override
     public ZKWatcher getZooKeeper() {
       return zookeeper;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8bf966c8/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
index 551b940..c25db01 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/MockServer.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.ChoreService;
@@ -30,7 +29,6 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.log.HBaseMarkers;
-import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -110,11 +108,6 @@ public class MockServer implements Server {
   }
 
   @Override
-  public MetaTableLocator getMetaTableLocator() {
-    return null;
-  }
-
-  @Override
   public ServerName getServerName() {
     return NAME;
   }


[05/51] [abbrv] hbase git commit: HBASE-21485 Add more debug logs for remote procedure execution

Posted by el...@apache.org.
HBASE-21485 Add more debug logs for remote procedure execution


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/43a10df7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/43a10df7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/43a10df7

Branch: refs/heads/HBASE-20952
Commit: 43a10df70fcf36999ce28e1ba0ac06e0d62ccaf1
Parents: a819875
Author: zhangduo <zh...@apache.org>
Authored: Fri Nov 16 11:18:58 2018 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Fri Nov 16 14:30:59 2018 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/master/HMaster.java     |  2 ++
 .../apache/hadoop/hbase/regionserver/RSRpcServices.java  |  3 +++
 .../regionserver/RemoteProcedureResultReporter.java      |  2 ++
 .../hbase/regionserver/handler/RSProcedureHandler.java   | 11 ++++++-----
 4 files changed, 13 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/43a10df7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index df744b6..31dc208 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3888,6 +3888,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   public void remoteProcedureCompleted(long procId) {
+    LOG.debug("Remote procedure done, pid={}", procId);
     RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
     if (procedure != null) {
       procedure.remoteOperationCompleted(procedureExecutor.getEnvironment());
@@ -3895,6 +3896,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   public void remoteProcedureFailed(long procId, RemoteProcedureException error) {
+    LOG.debug("Remote procedure failed, pid={}", procId, error);
     RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
     if (procedure != null) {
       procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);

http://git-wip-us.apache.org/repos/asf/hbase/blob/43a10df7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 17c582d..df84dcf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -3734,10 +3734,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       callable = Class.forName(request.getProcClass()).asSubclass(RSProcedureCallable.class)
         .getDeclaredConstructor().newInstance();
     } catch (Exception e) {
+      LOG.warn("Failed to instantiating remote procedure {}, pid={}", request.getProcClass(),
+        request.getProcId(), e);
       regionServer.remoteProcedureComplete(request.getProcId(), e);
       return;
     }
     callable.init(request.getProcData().toByteArray(), regionServer);
+    LOG.debug("Executing remote procedure {}, pid={}", callable.getClass(), request.getProcId());
     regionServer.executeProcedure(request.getProcId(), callable);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/43a10df7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java
index ac3e95a..efb044a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RemoteProcedureResultReporter.java
@@ -57,9 +57,11 @@ class RemoteProcedureResultReporter extends Thread {
   public void complete(long procId, Throwable error) {
     RemoteProcedureResult.Builder builder = RemoteProcedureResult.newBuilder().setProcId(procId);
     if (error != null) {
+      LOG.debug("Failed to complete execution of proc pid={}", procId, error);
       builder.setStatus(RemoteProcedureResult.Status.ERROR).setError(
         ForeignExceptionUtil.toProtoForeignException(server.getServerName().toString(), error));
     } else {
+      LOG.debug("Successfully complete execution of proc pid={}", procId);
       builder.setStatus(RemoteProcedureResult.Status.SUCCESS);
     }
     results.add(builder.build());

http://git-wip-us.apache.org/repos/asf/hbase/blob/43a10df7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
index d2175d0..ddff13f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
@@ -42,13 +42,14 @@ public class RSProcedureHandler extends EventHandler {
 
   @Override
   public void process() {
-    Exception error = null;
+    Throwable error = null;
     try {
       callable.call();
-    } catch (Exception e) {
-      LOG.error("Catch exception when call RSProcedureCallable: ", e);
-      error = e;
+    } catch (Throwable t) {
+      LOG.error("Error when call RSProcedureCallable: ", t);
+      error = t;
+    } finally {
+      ((HRegionServer) server).remoteProcedureComplete(procId, error);
     }
-    ((HRegionServer) server).remoteProcedureComplete(procId, error);
   }
 }


[51/51] [abbrv] hbase git commit: HBASE-20952 run the extended tests weekly until branch activity picks up.

Posted by el...@apache.org.
HBASE-20952 run the extended tests weekly until branch activity picks up.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb59426b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb59426b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb59426b

Branch: refs/heads/HBASE-20952
Commit: fb59426b793e91220e90384b401b14276ebb339f
Parents: 8d7061a
Author: Sean Busbey <bu...@apache.org>
Authored: Fri Nov 16 07:51:08 2018 -0600
Committer: Josh Elser <el...@apache.org>
Committed: Fri Dec 7 17:28:23 2018 -0500

----------------------------------------------------------------------
 dev-support/Jenkinsfile | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb59426b/dev-support/Jenkinsfile
----------------------------------------------------------------------
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index b333afb..bea425a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -21,7 +21,7 @@ pipeline {
     }
   }
   triggers {
-    cron('@daily')
+    cron('@weekly')
   }
   options {
     buildDiscarder(logRotator(numToKeepStr: '30'))


[28/51] [abbrv] hbase git commit: HBASE-21517 Move the getTableRegionForRow method from HMaster to TestMaster

Posted by el...@apache.org.
HBASE-21517 Move the getTableRegionForRow method from HMaster to TestMaster


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d6e1d18b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d6e1d18b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d6e1d18b

Branch: refs/heads/HBASE-20952
Commit: d6e1d18be901bd969be976ad8d20359e876e6cd0
Parents: 7877e09
Author: Duo Zhang <zh...@apache.org>
Authored: Tue Nov 27 16:14:44 2018 +0800
Committer: Duo Zhang <zh...@apache.org>
Committed: Wed Nov 28 09:40:27 2018 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java | 35 --------------------
 .../apache/hadoop/hbase/master/TestMaster.java  | 34 +++++++++++++++++--
 2 files changed, 32 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d6e1d18b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 428030d..132e271 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -47,7 +47,6 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
@@ -84,7 +83,6 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
@@ -2626,39 +2624,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     });
   }
 
-  /**
-   * Return the region and current deployment for the region containing
-   * the given row. If the region cannot be found, returns null. If it
-   * is found, but not currently deployed, the second element of the pair
-   * may be null.
-   */
-  @VisibleForTesting // Used by TestMaster.
-  Pair<RegionInfo, ServerName> getTableRegionForRow(
-      final TableName tableName, final byte [] rowKey)
-  throws IOException {
-    final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null);
-
-    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
-        @Override
-        public boolean visit(Result data) throws IOException {
-          if (data == null || data.size() <= 0) {
-            return true;
-          }
-          Pair<RegionInfo, ServerName> pair =
-              new Pair<>(MetaTableAccessor.getRegionInfo(data),
-                  MetaTableAccessor.getServerName(data,0));
-          if (!pair.getFirst().getTable().equals(tableName)) {
-            return false;
-          }
-          result.set(pair);
-          return true;
-        }
-    };
-
-    MetaTableAccessor.scanMeta(clusterConnection, visitor, tableName, rowKey, 1);
-    return result.get();
-  }
-
   private long modifyTable(final TableName tableName,
       final TableDescriptorGetter newDescriptorGetter, final long nonceGroup, final long nonce)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d6e1d18b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
index af89cbc..7fdf601 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMaster.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.fail;
 import java.io.IOException;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicReference;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -94,6 +96,35 @@ public class TestMaster {
     TEST_UTIL.shutdownMiniCluster();
   }
 
+  /**
+   * Return the region and current deployment for the region containing the given row. If the region
+   * cannot be found, returns null. If it is found, but not currently deployed, the second element
+   * of the pair may be null.
+   */
+  private Pair<RegionInfo, ServerName> getTableRegionForRow(HMaster master, TableName tableName,
+      byte[] rowKey) throws IOException {
+    final AtomicReference<Pair<RegionInfo, ServerName>> result = new AtomicReference<>(null);
+
+    MetaTableAccessor.Visitor visitor = new MetaTableAccessor.Visitor() {
+      @Override
+      public boolean visit(Result data) throws IOException {
+        if (data == null || data.size() <= 0) {
+          return true;
+        }
+        Pair<RegionInfo, ServerName> pair = new Pair<>(MetaTableAccessor.getRegionInfo(data),
+          MetaTableAccessor.getServerName(data, 0));
+        if (!pair.getFirst().getTable().equals(tableName)) {
+          return false;
+        }
+        result.set(pair);
+        return true;
+      }
+    };
+
+    MetaTableAccessor.scanMeta(master.getConnection(), visitor, tableName, rowKey, 1);
+    return result.get();
+  }
+
   @Test
   @SuppressWarnings("deprecation")
   public void testMasterOpsWhileSplitting() throws Exception {
@@ -128,8 +159,7 @@ public class TestMaster {
     // We have three regions because one is split-in-progress
     assertEquals(3, tableRegions.size());
     LOG.info("Making sure we can call getTableRegionClosest while opening");
-    Pair<RegionInfo, ServerName> pair =
-        m.getTableRegionForRow(TABLENAME, Bytes.toBytes("cde"));
+    Pair<RegionInfo, ServerName> pair = getTableRegionForRow(m, TABLENAME, Bytes.toBytes("cde"));
     LOG.info("Result is: " + pair);
     Pair<RegionInfo, ServerName> tableRegionFromName =
         MetaTableAccessor.getRegion(m.getConnection(),


[46/51] [abbrv] hbase git commit: HBASE-21414 : StoreFileSize growth rate metric

Posted by el...@apache.org.
HBASE-21414 : StoreFileSize growth rate metric

Signed-off-by: Sergey Shelukhin <se...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/170df27b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/170df27b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/170df27b

Branch: refs/heads/HBASE-20952
Commit: 170df27b8858c93ea22a38f28f653c347cb8ce7f
Parents: 12e75a8
Author: Tommy Li <To...@microsoft.com>
Authored: Thu Dec 6 13:43:13 2018 -0800
Committer: Sergey Shelukhin <se...@microsoft.com>
Committed: Thu Dec 6 14:43:17 2018 -0800

----------------------------------------------------------------------
 .../regionserver/MetricsRegionServerSource.java  |  3 +++
 .../regionserver/MetricsRegionServerWrapper.java |  5 +++++
 .../MetricsRegionServerSourceImpl.java           |  2 ++
 .../MetricsRegionServerWrapperImpl.java          | 19 ++++++++++++++-----
 .../MetricsRegionServerWrapperStub.java          |  7 ++++++-
 .../regionserver/TestMetricsRegionServer.java    |  1 +
 6 files changed, 31 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index b94ee2d..6d9ce54 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -234,6 +234,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
   String MEMSTORE_SIZE = "memStoreSize";
   String MEMSTORE_SIZE_DESC = "Size of the memstore";
   String STOREFILE_SIZE = "storeFileSize";
+  String STOREFILE_SIZE_GROWTH_RATE = "storeFileSizeGrowthRate";
   String MAX_STORE_FILE_AGE = "maxStoreFileAge";
   String MIN_STORE_FILE_AGE = "minStoreFileAge";
   String AVG_STORE_FILE_AGE = "avgStoreFileAge";
@@ -243,6 +244,8 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
   String AVG_STORE_FILE_AGE_DESC = "Average age of store files hosted on this RegionServer";
   String NUM_REFERENCE_FILES_DESC = "Number of reference file on this RegionServer";
   String STOREFILE_SIZE_DESC = "Size of storefiles being served.";
+  String STOREFILE_SIZE_GROWTH_RATE_DESC =
+        "Bytes per second by which the size of storefiles being served grows.";
   String TOTAL_REQUEST_COUNT = "totalRequestCount";
   String TOTAL_REQUEST_COUNT_DESC =
       "Total number of requests this RegionServer has answered; increments the count once for " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index b3a8dac..03ebc4c 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -101,6 +101,11 @@ public interface MetricsRegionServerWrapper {
   long getStoreFileSize();
 
   /**
+   * Get the growth rate of the store files this region server is serving from.
+   */
+  double getStoreFileSizeGrowthRate();
+
+  /**
    * @return Max age of store files hosted on this region server
    */
   long getMaxStoreFileAge();

http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 44dbc79..58c42a5 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -482,6 +482,8 @@ public class MetricsRegionServerSourceImpl
                     rsWrap.getNumStoreFiles())
             .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize())
             .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize())
+            .addGauge(Interns.info(STOREFILE_SIZE_GROWTH_RATE, STOREFILE_SIZE_GROWTH_RATE_DESC),
+                    rsWrap.getStoreFileSizeGrowthRate())
             .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC),
                     rsWrap.getMaxStoreFileAge())
             .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC),

http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index 04a32c8..b38c3e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -68,6 +68,7 @@ class MetricsRegionServerWrapperImpl
   private volatile long numStoreFiles = 0;
   private volatile long memstoreSize = 0;
   private volatile long storeFileSize = 0;
+  private volatile double storeFileSizeGrowthRate = 0;
   private volatile long maxStoreFileAge = 0;
   private volatile long minStoreFileAge = 0;
   private volatile long avgStoreFileAge = 0;
@@ -512,6 +513,11 @@ class MetricsRegionServerWrapperImpl
     return storeFileSize;
   }
 
+  @Override
+  public double getStoreFileSizeGrowthRate() {
+    return storeFileSizeGrowthRate;
+  }
+
   @Override public double getRequestsPerSecond() {
     return requestsPerSecond;
   }
@@ -730,6 +736,7 @@ class MetricsRegionServerWrapperImpl
     private long lastRequestCount = 0;
     private long lastReadRequestsCount = 0;
     private long lastWriteRequestsCount = 0;
+    private long lastStoreFileSize = 0;
 
     @Override
     synchronized public void run() {
@@ -870,18 +877,20 @@ class MetricsRegionServerWrapperImpl
           long intervalReadRequestsCount = tempReadRequestsCount - lastReadRequestsCount;
           long intervalWriteRequestsCount = tempWriteRequestsCount - lastWriteRequestsCount;
 
-          double readRequestsRatePerMilliSecond = ((double)intervalReadRequestsCount/
-              (double)period);
-          double writeRequestsRatePerMilliSecond = ((double)intervalWriteRequestsCount/
-              (double)period);
+          double readRequestsRatePerMilliSecond = (double)intervalReadRequestsCount / period;
+          double writeRequestsRatePerMilliSecond = (double)intervalWriteRequestsCount / period;
 
           readRequestsRatePerSecond = readRequestsRatePerMilliSecond * 1000.0;
           writeRequestsRatePerSecond = writeRequestsRatePerMilliSecond * 1000.0;
 
+          long intervalStoreFileSize = tempStoreFileSize - lastStoreFileSize;
+          storeFileSizeGrowthRate = (double)intervalStoreFileSize * 1000.0 / period;
+
           lastReadRequestsCount = tempReadRequestsCount;
           lastWriteRequestsCount = tempWriteRequestsCount;
-
+          lastStoreFileSize = tempStoreFileSize;
         }
+
         lastRan = currentTime;
 
         WALProvider provider = regionServer.walFactory.getWALProvider();

http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index 314d69c..b003b44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -71,6 +71,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
   }
 
   @Override
+  public double getStoreFileSizeGrowthRate() {
+    return 50.0;
+  }
+
+  @Override
   public long getMaxStoreFileAge() {
     return 2;
   }
@@ -222,7 +227,7 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
 
   @Override
   public long getMemStoreLimit() {
-	  return 419;
+    return 419;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/170df27b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
index fa8ea46..8215ea1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
@@ -78,6 +78,7 @@ public class TestMetricsRegionServer {
     HELPER.assertGauge("storeFileCount", 300, serverSource);
     HELPER.assertGauge("memstoreSize", 1025, serverSource);
     HELPER.assertGauge("storeFileSize", 1900, serverSource);
+    HELPER.assertGauge("storeFileSizeGrowthRate", 50.0, serverSource);
     HELPER.assertCounter("totalRequestCount", 899, serverSource);
     HELPER.assertCounter("totalRowActionRequestCount",
       HELPER.getCounter("readRequestCount", serverSource)