You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2015/08/26 22:07:16 UTC

[1/2] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

Repository: hbase
Updated Branches:
  refs/heads/hbase-6721 16f65badc -> fade887a3


http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 7a3b01f..05d2fb3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -62,12 +62,15 @@ public class MasterCoprocessorHost
   static class MasterEnvironment extends CoprocessorHost.Environment
       implements MasterCoprocessorEnvironment {
     private MasterServices masterServices;
+    final boolean supportGroupCPs;
 
     public MasterEnvironment(final Class<?> implClass, final Coprocessor impl,
         final int priority, final int seq, final Configuration conf,
         final MasterServices services) {
       super(impl, priority, seq, conf);
       this.masterServices = services;
+      supportGroupCPs = !useLegacyMethod(impl.getClass(),
+          "preBalanceGroup", ObserverContext.class, String.class);
     }
 
     public MasterServices getMasterServices() {
@@ -1110,7 +1113,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.preMoveServers(ctx, servers, targetGroup);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.preMoveServers(ctx, servers, targetGroup);
+        }
       }
     });
   }
@@ -1121,7 +1126,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.postMoveServers(ctx, servers, targetGroup);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.postMoveServers(ctx, servers, targetGroup);
+        }
       }
     });
   }
@@ -1132,7 +1139,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.preMoveTables(ctx, tables, targetGroup);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.preMoveTables(ctx, tables, targetGroup);
+        }
       }
     });
   }
@@ -1143,7 +1152,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.postMoveTables(ctx, tables, targetGroup);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.postMoveTables(ctx, tables, targetGroup);
+        }
       }
     });
   }
@@ -1154,7 +1165,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.preAddGroup(ctx, name);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.preAddGroup(ctx, name);
+        }
       }
     });
   }
@@ -1165,7 +1178,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.postAddGroup(ctx, name);
+        if (((MasterEnvironment) ctx.getEnvironment()).supportGroupCPs) {
+          oserver.postAddGroup(ctx, name);
+        }
       }
     });
   }
@@ -1176,7 +1191,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.preRemoveGroup(ctx, name);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.preRemoveGroup(ctx, name);
+        }
       }
     });
   }
@@ -1187,7 +1204,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.postRemoveGroup(ctx, name);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.postRemoveGroup(ctx, name);
+        }
       }
     });
   }
@@ -1198,7 +1217,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.preBalanceGroup(ctx, name);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.preBalanceGroup(ctx, name);
+        }
       }
     });
   }
@@ -1209,7 +1230,9 @@ public class MasterCoprocessorHost
       @Override
       public void call(MasterObserver oserver,
           ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
-        oserver.postBalanceGroup(ctx, name, balanceRan);
+        if(((MasterEnvironment)ctx.getEnvironment()).supportGroupCPs) {
+          oserver.postBalanceGroup(ctx, name, balanceRan);
+        }
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 999da30..a7b94c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1331,6 +1331,14 @@ public class MasterRpcServices extends RSRpcServices
       }
       Pair<HRegionInfo, ServerName> pair =
         MetaTableAccessor.getRegion(master.getConnection(), regionName);
+      if (Bytes.equals(HRegionInfo.FIRST_META_REGIONINFO.getRegionName(),regionName)) {
+        pair = new Pair<HRegionInfo, ServerName>(HRegionInfo.FIRST_META_REGIONINFO,
+            master.getMetaTableLocator().getMetaRegionLocation(master.getZooKeeper()));
+      }
+      if (pair == null) {
+        throw new UnknownRegionException(Bytes.toString(regionName));
+      }
+
       if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
       HRegionInfo hri = pair.getFirst();
       if (master.cpHost != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
index aa7ef1f..673c48e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroups.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase.group;
 
+import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 import org.apache.commons.logging.Log;
@@ -34,6 +35,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
@@ -46,15 +48,20 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -62,8 +69,6 @@ import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Mockito.doThrow;
-import static org.mockito.Mockito.verify;
 
 @Category({MediumTests.class})
 public class TestGroups extends TestGroupsBase {
@@ -139,12 +144,12 @@ public class TestGroups extends TestGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        LOG.info("Waiting for cleanup to finish "+groupAdmin.listGroups());
+        LOG.info("Waiting for cleanup to finish " + groupAdmin.listGroups());
         //Might be greater since moving servers back to default
         //is after starting a server
 
         return groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP).getServers().size()
-           == NUM_SLAVES_BASE;
+            == NUM_SLAVES_BASE;
       }
     });
   }
@@ -156,12 +161,37 @@ public class TestGroups extends TestGroupsBase {
     //verify it was loaded properly
     assertEquals("hadoop:name=Group,service=Group", it.next().getCanonicalName());
 
-    final MXBeanImpl info = MXBeanImpl.init(groupAdmin, master);
+    final AtomicReference<HostPort> deadServer = new AtomicReference<HostPort>(null);
+
+    //We use mocks to simulate offline servers to avoid
+    //the complexity and overhead of killing servers
+    MasterServices mockMaster = Mockito.mock(MasterServices.class);
+    final ServerManager mockServerManager = Mockito.mock(ServerManager.class);
+    Mockito.when(mockMaster.getServerManager()).thenReturn(mockServerManager);
+    Mockito.when(mockServerManager.getOnlineServersList()).then(new Answer<List<ServerName>>() {
+      @Override
+      public List<ServerName> answer(InvocationOnMock invocation) throws Throwable {
+        GroupInfo groupInfo = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP);
+        List<ServerName> finalList = Lists.newArrayList();
+        HostPort lastServer = groupInfo.getServers().last();
+        for (ServerName server: master.getServerManager().getOnlineServersList()) {
+          if (!server.getHostPort().equals(lastServer)) {
+            finalList.add(server);
+          }
+        }
+        deadServer.set(lastServer);
+        return finalList;
+      }
+    });
+    MXBean info = new MXBeanImpl(groupAdmin, mockMaster);
+
     GroupInfo defaultGroup = groupAdmin.getGroupInfo(GroupInfo.DEFAULT_GROUP);
     assertEquals(2, info.getGroups().size());
     assertEquals(defaultGroup.getName(), info.getGroups().get(0).getName());
     assertEquals(defaultGroup.getServers(), Sets.newTreeSet(info.getGroups().get(0).getServers()));
-    assertEquals(defaultGroup.getServers(), Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP)));
+    assertEquals(defaultGroup.getServers().headSet(deadServer.get()),
+        Sets.newTreeSet(info.getServersByGroup().get(GroupInfo.DEFAULT_GROUP)));
+
 
     GroupInfo barGroup = addGroup(groupAdmin, "bar", 3);
     TableName tableName1 = TableName.valueOf(tablePrefix+"_testJmx1");

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java
index f2834ce..ac53de5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsBase.java
@@ -272,7 +272,7 @@ public abstract class TestGroupsBase {
   }
 
   @Test
-  public void testTableMoveAndDrop() throws Exception {
+  public void testTableMoveTruncateAndDrop() throws Exception {
     LOG.info("testTableMove");
 
     final TableName tableName = TableName.valueOf(tablePrefix + "_testTableMoveAndDrop");
@@ -318,6 +318,12 @@ public abstract class TestGroupsBase {
       }
     });
 
+    //test truncate
+    admin.disableTable(tableName);
+    admin.truncateTable(tableName, true);
+    assertEquals(1, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size());
+    assertEquals(tableName, groupAdmin.getGroupInfo(newGroup.getName()).getTables().first());
+
     //verify removed table is removed from group
     TEST_UTIL.deleteTable(tableName);
     assertEquals(0, groupAdmin.getGroupInfo(newGroup.getName()).getTables().size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java
new file mode 100644
index 0000000..d5da85d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/group/TestGroupsOfflineMode.java
@@ -0,0 +1,181 @@
+/**
+ * Copyright The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.group;
+
+import com.google.common.collect.Sets;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseCluster;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
+
+//This tests that GroupBasedBalancer will use data in zk
+//to do balancing during master startup
+//This does not test retain assignment
+@Category(MediumTests.class)
+public class TestGroupsOfflineMode {
+  private static final org.apache.commons.logging.Log LOG = LogFactory.getLog(TestGroupsOfflineMode.class);
+  private static HMaster master;
+  private static HBaseAdmin hbaseAdmin;
+  private static HBaseTestingUtility TEST_UTIL;
+  private static HBaseCluster cluster;
+  public final static long WAIT_TIMEOUT = 60000*5;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL = new HBaseTestingUtility();
+    TEST_UTIL.getConfiguration().set(
+        HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
+        GroupBasedLoadBalancer.class.getName());
+    TEST_UTIL.getConfiguration().set(
+        ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART,
+        "1");
+    TEST_UTIL.startMiniCluster(2, 3);
+    cluster = TEST_UTIL.getHBaseCluster();
+    master = ((MiniHBaseCluster)cluster).getMaster();
+    master.balanceSwitch(false);
+    hbaseAdmin = new HBaseAdmin(TEST_UTIL.getConfiguration());
+    //wait till the balancer is in online mode
+    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return master.isInitialized() &&
+            ((GroupBasedLoadBalancer) master.getLoadBalancer()).isOnline() &&
+            master.getServerManager().getOnlineServersList().size() >= 3;
+      }
+    });
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testOffline() throws Exception, InterruptedException {
+    //table should be after group table name
+    //so it gets assigned later
+    final TableName failoverTable = TableName.valueOf("testOffline");
+    TEST_UTIL.createTable(failoverTable, Bytes.toBytes("f"));
+
+    GroupAdminClient groupAdmin = new GroupAdminClient(TEST_UTIL.getConfiguration());
+
+    final HRegionServer killRS = ((MiniHBaseCluster)cluster).getRegionServer(0);
+    final HRegionServer groupRS = ((MiniHBaseCluster)cluster).getRegionServer(1);
+    final HRegionServer failoverRS = ((MiniHBaseCluster)cluster).getRegionServer(2);
+
+    String newGroup =  "my_group";
+    groupAdmin.addGroup(newGroup);
+    if(master.getAssignmentManager().getRegionStates().getRegionAssignments()
+        .containsValue(failoverRS.getServerName())) {
+      for(HRegionInfo regionInfo: hbaseAdmin.getOnlineRegions(failoverRS.getServerName())) {
+        hbaseAdmin.move(regionInfo.getEncodedNameAsBytes(),
+            Bytes.toBytes(failoverRS.getServerName().getServerName()));
+      }
+      LOG.info("Waiting for region unassignments on failover RS...");
+      TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+        @Override
+        public boolean evaluate() throws Exception {
+          return master.getServerManager().getLoad(failoverRS.getServerName())
+              .getRegionsLoad().size() > 0;
+        }
+      });
+    }
+
+    //move server to group and make sure all tables are assigned
+    groupAdmin.moveServers(Sets.newHashSet(groupRS.getServerName().getHostPort()), newGroup);
+    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return groupRS.getNumberOfOnlineRegions() < 1 &&
+            master.getAssignmentManager().getRegionStates().getRegionsInTransition().size() < 1;
+      }
+    });
+    //move table to group and wait
+    groupAdmin.moveTables(Sets.newHashSet(GroupInfoManager.GROUP_TABLE_NAME), newGroup);
+    LOG.info("Waiting for move table...");
+    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return groupRS.getNumberOfOnlineRegions() == 1;
+      }
+    });
+
+    groupRS.stop("die");
+    //race condition here
+    TEST_UTIL.getHBaseCluster().getMaster().stopMaster();
+    LOG.info("Waiting for offline mode...");
+    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return TEST_UTIL.getHBaseCluster().getMaster() != null &&
+            TEST_UTIL.getHBaseCluster().getMaster().isActiveMaster() &&
+            TEST_UTIL.getHBaseCluster().getMaster().isInitialized() &&
+            TEST_UTIL.getHBaseCluster().getMaster().getServerManager().getOnlineServers().size()
+                <= 3;
+      }
+    });
+
+
+    GroupInfoManager groupMgr =
+        ((GroupBasedLoadBalancer)TEST_UTIL.getHBaseCluster().getMaster().getLoadBalancer())
+        .getGroupInfoManager();
+    //make sure balancer is in offline mode, since this is what we're testing
+    assertFalse(groupMgr.isOnline());
+    //verify the group affiliation that's loaded from ZK instead of tables
+    assertEquals(newGroup,
+        groupMgr.getGroupOfTable(GroupInfoManager.GROUP_TABLE_NAME));
+    assertEquals(GroupInfo.DEFAULT_GROUP, groupMgr.getGroupOfTable(failoverTable));
+
+    //kill final regionserver to see the failover happens for all tables
+    //except GROUP table since it's group does not have any online RS
+    killRS.stop("die");
+    master = TEST_UTIL.getHBaseCluster().getMaster();
+    LOG.info("Waiting for new table assignment...");
+    TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
+      @Override
+      public boolean evaluate() throws Exception {
+        return failoverRS.getOnlineRegions(failoverTable).size() >= 1;
+      }
+    });
+    assertEquals(0, failoverRS.getOnlineRegions(GroupInfoManager.GROUP_TABLE_NAME).size());
+
+    //need this for minicluster to shutdown cleanly
+    master.stopMaster();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 405c5a9..d49a57e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2640,19 +2640,9 @@ public class TestAccessController extends SecureTestUtil {
         return null;
       }
     };
-    AccessTestAction action2 = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        ACCESS_CONTROLLER.postMoveServers(ObserverContext.createAndPrepare(CP_ENV, null),
-            null, null);
-        return null;
-      }
-    };
 
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
-    verifyAllowed(action2, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
-    verifyDenied(action2, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
 
   @Test
@@ -2665,19 +2655,9 @@ public class TestAccessController extends SecureTestUtil {
         return null;
       }
     };
-    AccessTestAction action2 = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        ACCESS_CONTROLLER.postMoveTables(ObserverContext.createAndPrepare(CP_ENV, null),
-            null, null);
-        return null;
-      }
-    };
 
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
-    verifyAllowed(action2, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
-    verifyDenied(action2, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
 
   @Test
@@ -2690,19 +2670,9 @@ public class TestAccessController extends SecureTestUtil {
         return null;
       }
     };
-    AccessTestAction action2 = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        ACCESS_CONTROLLER.postAddGroup(ObserverContext.createAndPrepare(CP_ENV, null),
-            null);
-        return null;
-      }
-    };
 
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
-    verifyAllowed(action2, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
-    verifyDenied(action2, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
 
   @Test
@@ -2715,19 +2685,9 @@ public class TestAccessController extends SecureTestUtil {
         return null;
       }
     };
-    AccessTestAction action2 = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        ACCESS_CONTROLLER.postRemoveGroup(ObserverContext.createAndPrepare(CP_ENV, null),
-            null);
-        return null;
-      }
-    };
 
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
-    verifyAllowed(action2, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
-    verifyDenied(action2, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
 
   @Test
@@ -2740,18 +2700,8 @@ public class TestAccessController extends SecureTestUtil {
         return null;
       }
     };
-    AccessTestAction action2 = new AccessTestAction() {
-      @Override
-      public Object run() throws Exception {
-        ACCESS_CONTROLLER.postBalanceGroup(ObserverContext.createAndPrepare(CP_ENV, null),
-            null, false);
-        return null;
-      }
-    };
 
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
-    verifyAllowed(action2, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
-    verifyDenied(action2, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index d6a05a7..8aa2983 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -415,26 +415,6 @@ module Hbase
 
       table_description = @admin.getTableDescriptor(TableName.valueOf(table_name))
 
-      #clone group
-      if(groups_available?(conf))
-      	group_admin =  org.apache.hadoop.hbase.group.GroupAdminClient.new(@conf)
-      	group_info = group_admin.getGroupInfoOfTable(table_name)
-        exp_group = group_info.getName
-        if(exp_group == "default")
-          exp_group = nil;
-        end
-        ns =
-            @admin.getNamespaceDescriptor(
-                org.apache.hadoop.hbase.TableName.valueOf(table_name).getNamespaceAsString)
-        ns_group =
-          ns.getValue(org.apache.hadoop.hbase.group.GroupInfo::NAMESPACEDESC_PROP_GROUP)
-        if(!exp_group.nil? && ns_group.nil?|| (ns_group != exp_group))
-          yield " - Preserving explicit group assignment to #{exp_group}" if block_given?
-          table_description.setValue(org.apache.hadoop.hbase.group.GroupInfo::TABLEDESC_PROP_GROUP,
-          group_info.getName())
-        end
-      end
-
       yield 'Disabling table...' if block_given?
       disable(table_name)
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-shell/src/main/ruby/hbase/group_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/group_admin.rb b/hbase-shell/src/main/ruby/hbase/group_admin.rb
index 4532031..bb4cefe 100644
--- a/hbase-shell/src/main/ruby/hbase/group_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/group_admin.rb
@@ -51,9 +51,9 @@ module Hbase
       end
       group.getServers.each do |v|
         if block_given?
-          yield(v)
+          yield(v.toString)
         else
-          res += v
+          res += v.toString
         end
       end
       if block_given?

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 3cc1f06..561a1c1 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -434,7 +434,5 @@ Shell.load_command_group(
     move_group_tables
     get_server_group
     get_table_group
-    list_group_tables
-    list_group_server_transitions
   ]
 )

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb b/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb
deleted file mode 100644
index 313873f..0000000
--- a/hbase-shell/src/main/ruby/shell/commands/list_group_server_transitions.rb
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-# Copyright The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#TODO make this command name sho
-module Shell
-  module Commands
-    class ListGroupServerTransitions < Command
-      def help
-        return <<-EOF
-List region servers in transition.
-
-Example:
-
-  hbase> list_group_server_transitions 'default'
-EOF
-      end
-      def command()
-        now = Time.now
-        formatter.header(["Server", "Destination"])
-        count = group_admin.listServersInTransition do |server, dest|
-          formatter.row([ server, dest ])
-        end
-        formatter.footer(now, count)
-      end
-    end
-  end
-end

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb b/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb
deleted file mode 100644
index ae0862c..0000000
--- a/hbase-shell/src/main/ruby/shell/commands/list_group_tables.rb
+++ /dev/null
@@ -1,45 +0,0 @@
-#
-# Copyright The Apache Software Foundation
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-module Shell
-  module Commands
-    class ListGroupTables < Command
-      def help
-        return <<-EOF
-List member tables of a given region server group in hbase.
-
-Example:
-
-  hbase> list_group_tables 'default'
-EOF
-      end
-
-      def command(group_name)
-        now = Time.now
-        formatter.header([ "TABLES" ])
-        list = group_admin.listTablesOfGroup(group_name)
-        list.each do |table|
-          formatter.row([ table.toString ])
-        end
-        formatter.footer(now, list.size)
-      end
-    end
-  end
-end


[2/2] hbase git commit: HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)

Posted by ap...@apache.org.
HBASE-14312 Forward port some fixes from hbase-6721-0.98 to hbase-6721 (Francis Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fade887a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fade887a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fade887a

Branch: refs/heads/hbase-6721
Commit: fade887a309e6b12a4e580c5207794b3b05e9b4e
Parents: 16f65ba
Author: Andrew Purtell <ap...@apache.org>
Authored: Wed Aug 26 13:04:29 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Wed Aug 26 13:04:29 2015 -0700

----------------------------------------------------------------------
 .../hbase/group/IntegrationTestGroup.java       |    5 -
 .../hbase/protobuf/generated/MasterProtos.java  | 1348 ++----------------
 hbase-protocol/src/main/protobuf/Master.proto   |    7 -
 .../hadoop/hbase/group/GroupAdminServer.java    |   94 +-
 .../hbase/group/GroupBasedLoadBalancer.java     |   37 +-
 .../hbase/group/GroupInfoManagerImpl.java       |   32 +-
 .../org/apache/hadoop/hbase/group/MXBean.java   |    8 +-
 .../apache/hadoop/hbase/group/MXBeanImpl.java   |   19 +-
 .../hadoop/hbase/master/AssignmentManager.java  |    8 +-
 .../hbase/master/MasterCoprocessorHost.java     |   43 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |    8 +
 .../apache/hadoop/hbase/group/TestGroups.java   |   42 +-
 .../hadoop/hbase/group/TestGroupsBase.java      |    8 +-
 .../hbase/group/TestGroupsOfflineMode.java      |  181 +++
 .../security/access/TestAccessController.java   |   50 -
 hbase-shell/src/main/ruby/hbase/admin.rb        |   20 -
 hbase-shell/src/main/ruby/hbase/group_admin.rb  |    4 +-
 hbase-shell/src/main/ruby/shell.rb              |    2 -
 .../commands/list_group_server_transitions.rb   |   44 -
 .../ruby/shell/commands/list_group_tables.rb    |   45 -
 20 files changed, 498 insertions(+), 1507 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
index 22cddd7..62f4f8a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/group/IntegrationTestGroup.java
@@ -74,11 +74,6 @@ public class IntegrationTestGroup extends TestGroupsBase {
     ((IntegrationTestingUtility)TEST_UTIL).restoreCluster();
     LOG.info("Done restoring the cluster");
 
-    groupAdmin.addGroup("master");
-    groupAdmin.moveServers(
-        Sets.newHashSet(cluster.getInitialClusterStatus().getMaster().getHostPort()),
-        "master");
-
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index beee110..71cc5d4 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -61124,1065 +61124,6 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.GetGroupInfoOfServerResponse)
   }
 
-  public interface ListServersInTransitionRequestOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListServersInTransitionRequest}
-   */
-  public static final class ListServersInTransitionRequest extends
-      com.google.protobuf.GeneratedMessage
-      implements ListServersInTransitionRequestOrBuilder {
-    // Use ListServersInTransitionRequest.newBuilder() to construct.
-    private ListServersInTransitionRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ListServersInTransitionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ListServersInTransitionRequest defaultInstance;
-    public static ListServersInTransitionRequest getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ListServersInTransitionRequest getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ListServersInTransitionRequest(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionRequest_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionRequest_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ListServersInTransitionRequest> PARSER =
-        new com.google.protobuf.AbstractParser<ListServersInTransitionRequest>() {
-      public ListServersInTransitionRequest parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ListServersInTransitionRequest(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ListServersInTransitionRequest> getParserForType() {
-      return PARSER;
-    }
-
-    private void initFields() {
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest) obj;
-
-      boolean result = true;
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ListServersInTransitionRequest}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequestOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionRequest_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionRequest_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionRequest_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest(this);
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionRequest) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ListServersInTransitionRequest)
-    }
-
-    static {
-      defaultInstance = new ListServersInTransitionRequest(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ListServersInTransitionRequest)
-  }
-
-  public interface ListServersInTransitionResponseOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // repeated .hbase.pb.NameStringPair transitions = 1;
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> 
-        getTransitionsList();
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index);
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    int getTransitionsCount();
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
-        getTransitionsOrBuilderList();
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder(
-        int index);
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListServersInTransitionResponse}
-   */
-  public static final class ListServersInTransitionResponse extends
-      com.google.protobuf.GeneratedMessage
-      implements ListServersInTransitionResponseOrBuilder {
-    // Use ListServersInTransitionResponse.newBuilder() to construct.
-    private ListServersInTransitionResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ListServersInTransitionResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ListServersInTransitionResponse defaultInstance;
-    public static ListServersInTransitionResponse getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ListServersInTransitionResponse getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ListServersInTransitionResponse(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-                transitions_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>();
-                mutable_bitField0_ |= 0x00000001;
-              }
-              transitions_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.PARSER, extensionRegistry));
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-          transitions_ = java.util.Collections.unmodifiableList(transitions_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionResponse_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionResponse_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ListServersInTransitionResponse> PARSER =
-        new com.google.protobuf.AbstractParser<ListServersInTransitionResponse>() {
-      public ListServersInTransitionResponse parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ListServersInTransitionResponse(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ListServersInTransitionResponse> getParserForType() {
-      return PARSER;
-    }
-
-    // repeated .hbase.pb.NameStringPair transitions = 1;
-    public static final int TRANSITIONS_FIELD_NUMBER = 1;
-    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> transitions_;
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getTransitionsList() {
-      return transitions_;
-    }
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
-        getTransitionsOrBuilderList() {
-      return transitions_;
-    }
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    public int getTransitionsCount() {
-      return transitions_.size();
-    }
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index) {
-      return transitions_.get(index);
-    }
-    /**
-     * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder(
-        int index) {
-      return transitions_.get(index);
-    }
-
-    private void initFields() {
-      transitions_ = java.util.Collections.emptyList();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      for (int i = 0; i < getTransitionsCount(); i++) {
-        if (!getTransitions(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      for (int i = 0; i < transitions_.size(); i++) {
-        output.writeMessage(1, transitions_.get(i));
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      for (int i = 0; i < transitions_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, transitions_.get(i));
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse) obj;
-
-      boolean result = true;
-      result = result && getTransitionsList()
-          .equals(other.getTransitionsList());
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (getTransitionsCount() > 0) {
-        hash = (37 * hash) + TRANSITIONS_FIELD_NUMBER;
-        hash = (53 * hash) + getTransitionsList().hashCode();
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ListServersInTransitionResponse}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponseOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionResponse_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionResponse_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getTransitionsFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        if (transitionsBuilder_ == null) {
-          transitions_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-        } else {
-          transitionsBuilder_.clear();
-        }
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListServersInTransitionResponse_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse(this);
-        int from_bitField0_ = bitField0_;
-        if (transitionsBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001)) {
-            transitions_ = java.util.Collections.unmodifiableList(transitions_);
-            bitField0_ = (bitField0_ & ~0x00000001);
-          }
-          result.transitions_ = transitions_;
-        } else {
-          result.transitions_ = transitionsBuilder_.build();
-        }
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse.getDefaultInstance()) return this;
-        if (transitionsBuilder_ == null) {
-          if (!other.transitions_.isEmpty()) {
-            if (transitions_.isEmpty()) {
-              transitions_ = other.transitions_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-            } else {
-              ensureTransitionsIsMutable();
-              transitions_.addAll(other.transitions_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.transitions_.isEmpty()) {
-            if (transitionsBuilder_.isEmpty()) {
-              transitionsBuilder_.dispose();
-              transitionsBuilder_ = null;
-              transitions_ = other.transitions_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-              transitionsBuilder_ = 
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getTransitionsFieldBuilder() : null;
-            } else {
-              transitionsBuilder_.addAllMessages(other.transitions_);
-            }
-          }
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        for (int i = 0; i < getTransitionsCount(); i++) {
-          if (!getTransitions(i).isInitialized()) {
-            
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListServersInTransitionResponse) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // repeated .hbase.pb.NameStringPair transitions = 1;
-      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> transitions_ =
-        java.util.Collections.emptyList();
-      private void ensureTransitionsIsMutable() {
-        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
-          transitions_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair>(transitions_);
-          bitField0_ |= 0x00000001;
-         }
-      }
-
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> transitionsBuilder_;
-
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> getTransitionsList() {
-        if (transitionsBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(transitions_);
-        } else {
-          return transitionsBuilder_.getMessageList();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public int getTransitionsCount() {
-        if (transitionsBuilder_ == null) {
-          return transitions_.size();
-        } else {
-          return transitionsBuilder_.getCount();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair getTransitions(int index) {
-        if (transitionsBuilder_ == null) {
-          return transitions_.get(index);
-        } else {
-          return transitionsBuilder_.getMessage(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder setTransitions(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
-        if (transitionsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureTransitionsIsMutable();
-          transitions_.set(index, value);
-          onChanged();
-        } else {
-          transitionsBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder setTransitions(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
-        if (transitionsBuilder_ == null) {
-          ensureTransitionsIsMutable();
-          transitions_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          transitionsBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder addTransitions(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
-        if (transitionsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureTransitionsIsMutable();
-          transitions_.add(value);
-          onChanged();
-        } else {
-          transitionsBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder addTransitions(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair value) {
-        if (transitionsBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureTransitionsIsMutable();
-          transitions_.add(index, value);
-          onChanged();
-        } else {
-          transitionsBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder addTransitions(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
-        if (transitionsBuilder_ == null) {
-          ensureTransitionsIsMutable();
-          transitions_.add(builderForValue.build());
-          onChanged();
-        } else {
-          transitionsBuilder_.addMessage(builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder addTransitions(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder builderForValue) {
-        if (transitionsBuilder_ == null) {
-          ensureTransitionsIsMutable();
-          transitions_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          transitionsBuilder_.addMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder addAllTransitions(
-          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair> values) {
-        if (transitionsBuilder_ == null) {
-          ensureTransitionsIsMutable();
-          super.addAll(values, transitions_);
-          onChanged();
-        } else {
-          transitionsBuilder_.addAllMessages(values);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder clearTransitions() {
-        if (transitionsBuilder_ == null) {
-          transitions_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-          onChanged();
-        } else {
-          transitionsBuilder_.clear();
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public Builder removeTransitions(int index) {
-        if (transitionsBuilder_ == null) {
-          ensureTransitionsIsMutable();
-          transitions_.remove(index);
-          onChanged();
-        } else {
-          transitionsBuilder_.remove(index);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder getTransitionsBuilder(
-          int index) {
-        return getTransitionsFieldBuilder().getBuilder(index);
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder getTransitionsOrBuilder(
-          int index) {
-        if (transitionsBuilder_ == null) {
-          return transitions_.get(index);  } else {
-          return transitionsBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
-           getTransitionsOrBuilderList() {
-        if (transitionsBuilder_ != null) {
-          return transitionsBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(transitions_);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addTransitionsBuilder() {
-        return getTransitionsFieldBuilder().addBuilder(
-            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder addTransitionsBuilder(
-          int index) {
-        return getTransitionsFieldBuilder().addBuilder(
-            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.NameStringPair transitions = 1;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder> 
-           getTransitionsBuilderList() {
-        return getTransitionsFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder> 
-          getTransitionsFieldBuilder() {
-        if (transitionsBuilder_ == null) {
-          transitionsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPairOrBuilder>(
-                  transitions_,
-                  ((bitField0_ & 0x00000001) == 0x00000001),
-                  getParentForChildren(),
-                  isClean());
-          transitions_ = null;
-        }
-        return transitionsBuilder_;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ListServersInTransitionResponse)
-    }
-
-    static {
-      defaultInstance = new ListServersInTransitionResponse(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ListServersInTransitionResponse)
-  }
-
   /**
    * Protobuf service {@code hbase.pb.MasterService}
    */
@@ -67465,16 +66406,6 @@ public final class MasterProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_GetGroupInfoOfServerResponse_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ListServersInTransitionRequest_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_ListServersInTransitionRequest_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ListServersInTransitionResponse_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_ListServersInTransitionResponse_fieldAccessorTable;
 
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -67668,141 +66599,138 @@ public final class MasterProtos {
       "A\n\033GetGroupInfoOfServerRequest\022\"\n\006server" +
       "\030\001 \002(\0132\022.hbase.pb.HostPort\"G\n\034GetGroupIn" +
       "foOfServerResponse\022\'\n\ngroup_info\030\001 \001(\0132\023" +
-      ".hbase.pb.GroupInfo\" \n\036ListServersInTran" +
-      "sitionRequest\"P\n\037ListServersInTransition" +
-      "Response\022-\n\013transitions\030\001 \003(\0132\030.hbase.pb" +
-      ".NameStringPair2\271(\n\rMasterService\022e\n\024Get" +
-      "SchemaAlterStatus\022%.hbase.pb.GetSchemaAl" +
-      "terStatusRequest\032&.hbase.pb.GetSchemaAlt",
-      "erStatusResponse\022b\n\023GetTableDescriptors\022" +
-      "$.hbase.pb.GetTableDescriptorsRequest\032%." +
-      "hbase.pb.GetTableDescriptorsResponse\022P\n\r" +
-      "GetTableNames\022\036.hbase.pb.GetTableNamesRe" +
-      "quest\032\037.hbase.pb.GetTableNamesResponse\022Y" +
-      "\n\020GetClusterStatus\022!.hbase.pb.GetCluster" +
-      "StatusRequest\032\".hbase.pb.GetClusterStatu" +
-      "sResponse\022V\n\017IsMasterRunning\022 .hbase.pb." +
-      "IsMasterRunningRequest\032!.hbase.pb.IsMast" +
-      "erRunningResponse\022D\n\tAddColumn\022\032.hbase.p",
-      "b.AddColumnRequest\032\033.hbase.pb.AddColumnR" +
-      "esponse\022M\n\014DeleteColumn\022\035.hbase.pb.Delet" +
-      "eColumnRequest\032\036.hbase.pb.DeleteColumnRe" +
-      "sponse\022M\n\014ModifyColumn\022\035.hbase.pb.Modify" +
-      "ColumnRequest\032\036.hbase.pb.ModifyColumnRes" +
-      "ponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegio" +
-      "nRequest\032\034.hbase.pb.MoveRegionResponse\022k" +
-      "\n\026DispatchMergingRegions\022\'.hbase.pb.Disp" +
-      "atchMergingRegionsRequest\032(.hbase.pb.Dis" +
-      "patchMergingRegionsResponse\022M\n\014AssignReg",
-      "ion\022\035.hbase.pb.AssignRegionRequest\032\036.hba" +
-      "se.pb.AssignRegionResponse\022S\n\016UnassignRe" +
-      "gion\022\037.hbase.pb.UnassignRegionRequest\032 ." +
-      "hbase.pb.UnassignRegionResponse\022P\n\rOffli" +
-      "neRegion\022\036.hbase.pb.OfflineRegionRequest" +
-      "\032\037.hbase.pb.OfflineRegionResponse\022J\n\013Del" +
-      "eteTable\022\034.hbase.pb.DeleteTableRequest\032\035" +
-      ".hbase.pb.DeleteTableResponse\022P\n\rtruncat" +
-      "eTable\022\036.hbase.pb.TruncateTableRequest\032\037" +
-      ".hbase.pb.TruncateTableResponse\022J\n\013Enabl",
-      "eTable\022\034.hbase.pb.EnableTableRequest\032\035.h" +
-      "base.pb.EnableTableResponse\022M\n\014DisableTa" +
-      "ble\022\035.hbase.pb.DisableTableRequest\032\036.hba" +
-      "se.pb.DisableTableResponse\022J\n\013ModifyTabl" +
-      "e\022\034.hbase.pb.ModifyTableRequest\032\035.hbase." +
-      "pb.ModifyTableResponse\022J\n\013CreateTable\022\034." +
-      "hbase.pb.CreateTableRequest\032\035.hbase.pb.C" +
-      "reateTableResponse\022A\n\010Shutdown\022\031.hbase.p" +
-      "b.ShutdownRequest\032\032.hbase.pb.ShutdownRes" +
-      "ponse\022G\n\nStopMaster\022\033.hbase.pb.StopMaste",
-      "rRequest\032\034.hbase.pb.StopMasterResponse\022>" +
-      "\n\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hb" +
-      "ase.pb.BalanceResponse\022_\n\022SetBalancerRun" +
-      "ning\022#.hbase.pb.SetBalancerRunningReques" +
-      "t\032$.hbase.pb.SetBalancerRunningResponse\022" +
-      "\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBalanc" +
-      "erEnabledRequest\032#.hbase.pb.IsBalancerEn" +
-      "abledResponse\022S\n\016RunCatalogScan\022\037.hbase." +
-      "pb.RunCatalogScanRequest\032 .hbase.pb.RunC" +
-      "atalogScanResponse\022e\n\024EnableCatalogJanit",
-      "or\022%.hbase.pb.EnableCatalogJanitorReques" +
-      "t\032&.hbase.pb.EnableCatalogJanitorRespons" +
-      "e\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb." +
-      "IsCatalogJanitorEnabledRequest\032).hbase.p" +
-      "b.IsCatalogJanitorEnabledResponse\022^\n\021Exe" +
-      "cMasterService\022#.hbase.pb.CoprocessorSer" +
-      "viceRequest\032$.hbase.pb.CoprocessorServic" +
-      "eResponse\022A\n\010Snapshot\022\031.hbase.pb.Snapsho" +
-      "tRequest\032\032.hbase.pb.SnapshotResponse\022h\n\025" +
-      "GetCompletedSnapshots\022&.hbase.pb.GetComp",
-      "letedSnapshotsRequest\032\'.hbase.pb.GetComp" +
-      "letedSnapshotsResponse\022S\n\016DeleteSnapshot" +
-      "\022\037.hbase.pb.DeleteSnapshotRequest\032 .hbas" +
-      "e.pb.DeleteSnapshotResponse\022S\n\016IsSnapsho" +
-      "tDone\022\037.hbase.pb.IsSnapshotDoneRequest\032 " +
-      ".hbase.pb.IsSnapshotDoneResponse\022V\n\017Rest" +
-      "oreSnapshot\022 .hbase.pb.RestoreSnapshotRe" +
-      "quest\032!.hbase.pb.RestoreSnapshotResponse" +
-      "\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb.IsR" +
-      "estoreSnapshotDoneRequest\032\'.hbase.pb.IsR",
-      "estoreSnapshotDoneResponse\022P\n\rExecProced" +
-      "ure\022\036.hbase.pb.ExecProcedureRequest\032\037.hb" +
-      "ase.pb.ExecProcedureResponse\022W\n\024ExecProc" +
-      "edureWithRet\022\036.hbase.pb.ExecProcedureReq" +
-      "uest\032\037.hbase.pb.ExecProcedureResponse\022V\n" +
-      "\017IsProcedureDone\022 .hbase.pb.IsProcedureD" +
-      "oneRequest\032!.hbase.pb.IsProcedureDoneRes" +
-      "ponse\022V\n\017ModifyNamespace\022 .hbase.pb.Modi" +
-      "fyNamespaceRequest\032!.hbase.pb.ModifyName" +
-      "spaceResponse\022V\n\017CreateNamespace\022 .hbase",
-      ".pb.CreateNamespaceRequest\032!.hbase.pb.Cr" +
-      "eateNamespaceResponse\022V\n\017DeleteNamespace" +
-      "\022 .hbase.pb.DeleteNamespaceRequest\032!.hba" +
-      "se.pb.DeleteNamespaceResponse\022k\n\026GetName" +
-      "spaceDescriptor\022\'.hbase.pb.GetNamespaceD" +
-      "escriptorRequest\032(.hbase.pb.GetNamespace" +
-      "DescriptorResponse\022q\n\030ListNamespaceDescr" +
-      "iptors\022).hbase.pb.ListNamespaceDescripto" +
-      "rsRequest\032*.hbase.pb.ListNamespaceDescri" +
-      "ptorsResponse\022\206\001\n\037ListTableDescriptorsBy",
-      "Namespace\0220.hbase.pb.ListTableDescriptor" +
-      "sByNamespaceRequest\0321.hbase.pb.ListTable" +
-      "DescriptorsByNamespaceResponse\022t\n\031ListTa" +
-      "bleNamesByNamespace\022*.hbase.pb.ListTable" +
-      "NamesByNamespaceRequest\032+.hbase.pb.ListT" +
-      "ableNamesByNamespaceResponse\022P\n\rGetTable" +
-      "State\022\036.hbase.pb.GetTableStateRequest\032\037." +
-      "hbase.pb.GetTableStateResponse\022A\n\010SetQuo" +
-      "ta\022\031.hbase.pb.SetQuotaRequest\032\032.hbase.pb" +
-      ".SetQuotaResponse\022x\n\037getLastMajorCompact",
-      "ionTimestamp\022).hbase.pb.MajorCompactionT" +
-      "imestampRequest\032*.hbase.pb.MajorCompacti" +
-      "onTimestampResponse\022\212\001\n(getLastMajorComp" +
-      "actionTimestampForRegion\0222.hbase.pb.Majo" +
-      "rCompactionTimestampForRegionRequest\032*.h" +
-      "base.pb.MajorCompactionTimestampResponse" +
-      "\022_\n\022getProcedureResult\022#.hbase.pb.GetPro" +
-      "cedureResultRequest\032$.hbase.pb.GetProced" +
-      "ureResultResponse\022M\n\014GetGroupInfo\022\035.hbas" +
-      "e.pb.GetGroupInfoRequest\032\036.hbase.pb.GetG",
-      "roupInfoResponse\022b\n\023GetGroupInfoOfTable\022" +
-      "$.hbase.pb.GetGroupInfoOfTableRequest\032%." +
-      "hbase.pb.GetGroupInfoOfTableResponse\022e\n\024" +
-      "GetGroupInfoOfServer\022%.hbase.pb.GetGroup" +
-      "InfoOfServerRequest\032&.hbase.pb.GetGroupI" +
-      "nfoOfServerResponse\022J\n\013MoveServers\022\034.hba" +
-      "se.pb.MoveServersRequest\032\035.hbase.pb.Move" +
-      "ServersResponse\022G\n\nMoveTables\022\033.hbase.pb" +
-      ".MoveTablesRequest\032\034.hbase.pb.MoveTables" +
-      "Response\022A\n\010AddGroup\022\031.hbase.pb.AddGroup",
-      "Request\032\032.hbase.pb.AddGroupResponse\022J\n\013R" +
-      "emoveGroup\022\034.hbase.pb.RemoveGroupRequest" +
-      "\032\035.hbase.pb.RemoveGroupResponse\022M\n\014Balan" +
-      "ceGroup\022\035.hbase.pb.BalanceGroupRequest\032\036" +
-      ".hbase.pb.BalanceGroupResponse\022S\n\016ListGr" +
-      "oupInfos\022\037.hbase.pb.ListGroupInfosReques" +
-      "t\032 .hbase.pb.ListGroupInfosResponseBB\n*o" +
-      "rg.apache.hadoop.hbase.protobuf.generate" +
-      "dB\014MasterProtosH\001\210\001\001\240\001\001"
+      ".hbase.pb.GroupInfo2\271(\n\rMasterService\022e\n" +
+      "\024GetSchemaAlterStatus\022%.hbase.pb.GetSche" +
+      "maAlterStatusRequest\032&.hbase.pb.GetSchem" +
+      "aAlterStatusResponse\022b\n\023GetTableDescript" +
+      "ors\022$.hbase.pb.GetTableDescriptorsReques" +
+      "t\032%.hbase.pb.GetTableDescriptorsResponse",
+      "\022P\n\rGetTableNames\022\036.hbase.pb.GetTableNam" +
+      "esRequest\032\037.hbase.pb.GetTableNamesRespon" +
+      "se\022Y\n\020GetClusterStatus\022!.hbase.pb.GetClu" +
+      "sterStatusRequest\032\".hbase.pb.GetClusterS" +
+      "tatusResponse\022V\n\017IsMasterRunning\022 .hbase" +
+      ".pb.IsMasterRunningRequest\032!.hbase.pb.Is" +
+      "MasterRunningResponse\022D\n\tAddColumn\022\032.hba" +
+      "se.pb.AddColumnRequest\032\033.hbase.pb.AddCol" +
+      "umnResponse\022M\n\014DeleteColumn\022\035.hbase.pb.D" +
+      "eleteColumnRequest\032\036.hbase.pb.DeleteColu",
+      "mnResponse\022M\n\014ModifyColumn\022\035.hbase.pb.Mo" +
+      "difyColumnRequest\032\036.hbase.pb.ModifyColum" +
+      "nResponse\022G\n\nMoveRegion\022\033.hbase.pb.MoveR" +
+      "egionRequest\032\034.hbase.pb.MoveRegionRespon" +
+      "se\022k\n\026DispatchMergingRegions\022\'.hbase.pb." +
+      "DispatchMergingRegionsRequest\032(.hbase.pb" +
+      ".DispatchMergingRegionsResponse\022M\n\014Assig" +
+      "nRegion\022\035.hbase.pb.AssignRegionRequest\032\036" +
+      ".hbase.pb.AssignRegionResponse\022S\n\016Unassi" +
+      "gnRegion\022\037.hbase.pb.UnassignRegionReques",
+      "t\032 .hbase.pb.UnassignRegionResponse\022P\n\rO" +
+      "fflineRegion\022\036.hbase.pb.OfflineRegionReq" +
+      "uest\032\037.hbase.pb.OfflineRegionResponse\022J\n" +
+      "\013DeleteTable\022\034.hbase.pb.DeleteTableReque" +
+      "st\032\035.hbase.pb.DeleteTableResponse\022P\n\rtru" +
+      "ncateTable\022\036.hbase.pb.TruncateTableReque" +
+      "st\032\037.hbase.pb.TruncateTableResponse\022J\n\013E" +
+      "nableTable\022\034.hbase.pb.EnableTableRequest" +
+      "\032\035.hbase.pb.EnableTableResponse\022M\n\014Disab" +
+      "leTable\022\035.hbase.pb.DisableTableRequest\032\036",
+      ".hbase.pb.DisableTableResponse\022J\n\013Modify" +
+      "Table\022\034.hbase.pb.ModifyTableRequest\032\035.hb" +
+      "ase.pb.ModifyTableResponse\022J\n\013CreateTabl" +
+      "e\022\034.hbase.pb.CreateTableRequest\032\035.hbase." +
+      "pb.CreateTableResponse\022A\n\010Shutdown\022\031.hba" +
+      "se.pb.ShutdownRequest\032\032.hbase.pb.Shutdow" +
+      "nResponse\022G\n\nStopMaster\022\033.hbase.pb.StopM" +
+      "asterRequest\032\034.hbase.pb.StopMasterRespon" +
+      "se\022>\n\007Balance\022\030.hbase.pb.BalanceRequest\032" +
+      "\031.hbase.pb.BalanceResponse\022_\n\022SetBalance",
+      "rRunning\022#.hbase.pb.SetBalancerRunningRe" +
+      "quest\032$.hbase.pb.SetBalancerRunningRespo" +
+      "nse\022\\\n\021IsBalancerEnabled\022\".hbase.pb.IsBa" +
+      "lancerEnabledRequest\032#.hbase.pb.IsBalanc" +
+      "erEnabledResponse\022S\n\016RunCatalogScan\022\037.hb" +
+      "ase.pb.RunCatalogScanRequest\032 .hbase.pb." +
+      "RunCatalogScanResponse\022e\n\024EnableCatalogJ" +
+      "anitor\022%.hbase.pb.EnableCatalogJanitorRe" +
+      "quest\032&.hbase.pb.EnableCatalogJanitorRes" +
+      "ponse\022n\n\027IsCatalogJanitorEnabled\022(.hbase",
+      ".pb.IsCatalogJanitorEnabledRequest\032).hba" +
+      "se.pb.IsCatalogJanitorEnabledResponse\022^\n" +
+      "\021ExecMasterService\022#.hbase.pb.Coprocesso" +
+      "rServiceRequest\032$.hbase.pb.CoprocessorSe" +
+      "rviceResponse\022A\n\010Snapshot\022\031.hbase.pb.Sna" +
+      "pshotRequest\032\032.hbase.pb.SnapshotResponse" +
+      "\022h\n\025GetCompletedSnapshots\022&.hbase.pb.Get" +
+      "CompletedSnapshotsRequest\032\'.hbase.pb.Get" +
+      "CompletedSnapshotsResponse\022S\n\016DeleteSnap" +
+      "shot\022\037.hbase.pb.DeleteSnapshotRequest\032 .",
+      "hbase.pb.DeleteSnapshotResponse\022S\n\016IsSna" +
+      "pshotDone\022\037.hbase.pb.IsSnapshotDoneReque" +
+      "st\032 .hbase.pb.IsSnapshotDoneResponse\022V\n\017" +
+      "RestoreSnapshot\022 .hbase.pb.RestoreSnapsh" +
+      "otRequest\032!.hbase.pb.RestoreSnapshotResp" +
+      "onse\022h\n\025IsRestoreSnapshotDone\022&.hbase.pb" +
+      ".IsRestoreSnapshotDoneRequest\032\'.hbase.pb" +
+      ".IsRestoreSnapshotDoneResponse\022P\n\rExecPr" +
+      "ocedure\022\036.hbase.pb.ExecProcedureRequest\032" +
+      "\037.hbase.pb.ExecProcedureResponse\022W\n\024Exec",
+      "ProcedureWithRet\022\036.hbase.pb.ExecProcedur" +
+      "eRequest\032\037.hbase.pb.ExecProcedureRespons" +
+      "e\022V\n\017IsProcedureDone\022 .hbase.pb.IsProced" +
+      "ureDoneRequest\032!.hbase.pb.IsProcedureDon" +
+      "eResponse\022V\n\017ModifyNamespace\022 .hbase.pb." +
+      "ModifyNamespaceRequest\032!.hbase.pb.Modify" +
+      "NamespaceResponse\022V\n\017CreateNamespace\022 .h" +
+      "base.pb.CreateNamespaceRequest\032!.hbase.p" +
+      "b.CreateNamespaceResponse\022V\n\017DeleteNames" +
+      "pace\022 .hbase.pb.DeleteNamespaceRequest\032!",
+      ".hbase.pb.DeleteNamespaceResponse\022k\n\026Get" +
+      "NamespaceDescriptor\022\'.hbase.pb.GetNamesp" +
+      "aceDescriptorRequest\032(.hbase.pb.GetNames" +
+      "paceDescriptorResponse\022q\n\030ListNamespaceD" +
+      "escriptors\022).hbase.pb.ListNamespaceDescr" +
+      "iptorsRequest\032*.hbase.pb.ListNamespaceDe" +
+      "scriptorsResponse\022\206\001\n\037ListTableDescripto" +
+      "rsByNamespace\0220.hbase.pb.ListTableDescri" +
+      "ptorsByNamespaceRequest\0321.hbase.pb.ListT" +
+      "ableDescriptorsByNamespaceResponse\022t\n\031Li",
+      "stTableNamesByNamespace\022*.hbase.pb.ListT" +
+      "ableNamesByNamespaceRequest\032+.hbase.pb.L" +
+      "istTableNamesByNamespaceResponse\022P\n\rGetT" +
+      "ableState\022\036.hbase.pb.GetTableStateReques" +
+      "t\032\037.hbase.pb.GetTableStateResponse\022A\n\010Se" +
+      "tQuota\022\031.hbase.pb.SetQuotaRequest\032\032.hbas" +
+      "e.pb.SetQuotaResponse\022x\n\037getLastMajorCom" +
+      "pactionTimestamp\022).hbase.pb.MajorCompact" +
+      "ionTimestampRequest\032*.hbase.pb.MajorComp" +
+      "actionTimestampResponse\022\212\001\n(getLastMajor",
+      "CompactionTimestampForRegion\0222.hbase.pb." +
+      "MajorCompactionTimestampForRegionRequest" +
+      "\032*.hbase.pb.MajorCompactionTimestampResp" +
+      "onse\022_\n\022getProcedureResult\022#.hbase.pb.Ge" +
+      "tProcedureResultRequest\032$.hbase.pb.GetPr" +
+      "ocedureResultResponse\022M\n\014GetGroupInfo\022\035." +
+      "hbase.pb.GetGroupInfoRequest\032\036.hbase.pb." +
+      "GetGroupInfoResponse\022b\n\023GetGroupInfoOfTa" +
+      "ble\022$.hbase.pb.GetGroupInfoOfTableReques" +
+      "t\032%.hbase.pb.GetGroupInfoOfTableResponse",
+      "\022e\n\024GetGroupInfoOfServer\022%.hbase.pb.GetG" +
+      "roupInfoOfServerRequest\032&.hbase.pb.GetGr" +
+      "oupInfoOfServerResponse\022J\n\013MoveServers\022\034" +
+      ".hbase.pb.MoveServersRequest\032\035.hbase.pb." +
+      "MoveServersResponse\022G\n\nMoveTables\022\033.hbas" +
+      "e.pb.MoveTablesRequest\032\034.hbase.pb.MoveTa" +
+      "blesResponse\022A\n\010AddGroup\022\031.hbase.pb.AddG" +
+      "roupRequest\032\032.hbase.pb.AddGroupResponse\022" +
+      "J\n\013RemoveGroup\022\034.hbase.pb.RemoveGroupReq" +
+      "uest\032\035.hbase.pb.RemoveGroupResponse\022M\n\014B",
+      "alanceGroup\022\035.hbase.pb.BalanceGroupReque" +
+      "st\032\036.hbase.pb.BalanceGroupResponse\022S\n\016Li" +
+      "stGroupInfos\022\037.hbase.pb.ListGroupInfosRe" +
+      "quest\032 .hbase.pb.ListGroupInfosResponseB" +
+      "B\n*org.apache.hadoop.hbase.protobuf.gene" +
+      "ratedB\014MasterProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -68487,18 +67415,6 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_GetGroupInfoOfServerResponse_descriptor,
               new java.lang.String[] { "GroupInfo", });
-          internal_static_hbase_pb_ListServersInTransitionRequest_descriptor =
-            getDescriptor().getMessageTypes().get(113);
-          internal_static_hbase_pb_ListServersInTransitionRequest_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_ListServersInTransitionRequest_descriptor,
-              new java.lang.String[] { });
-          internal_static_hbase_pb_ListServersInTransitionResponse_descriptor =
-            getDescriptor().getMessageTypes().get(114);
-          internal_static_hbase_pb_ListServersInTransitionResponse_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_ListServersInTransitionResponse_descriptor,
-              new java.lang.String[] { "Transitions", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 3e58492..4c12b0b 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -530,13 +530,6 @@ message GetGroupInfoOfServerResponse {
   optional GroupInfo group_info = 1;
 }
 
-message ListServersInTransitionRequest {
-}
-
-message ListServersInTransitionResponse {
-  repeated NameStringPair transitions = 1;
-}
-
 service MasterService {
   /** Used by the client to get the number of regions that have received the updated schema */
   rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
index a0ab98f..c64a749 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupAdminServer.java
@@ -164,59 +164,65 @@ public class GroupAdminServer implements GroupAdmin {
             "Target group is the same as source group: "+targetGroupName);
       }
 
-      //update the servers as in transition
-      for(HostPort server: servers) {
-        serversInTransition.put(server, targetGroupName);
-      }
+      try {
+        //update the servers as in transition
+        for (HostPort server : servers) {
+          serversInTransition.put(server, targetGroupName);
+        }
 
-      getGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName);
-      boolean found;
-      List<HostPort> tmpServers = Lists.newArrayList(servers);
-      do {
-        found = false;
-        for(Iterator<HostPort> iter = tmpServers.iterator();
-            iter.hasNext(); ) {
-          HostPort rs = iter.next();
-          //get online regions
-          List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
-          for(Map.Entry<HRegionInfo, ServerName> el:
-              master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
-            if (el.getValue().getHostPort().equals(rs)) {
-              regions.add(el.getKey());
+        getGroupInfoManager().moveServers(servers, sourceGroupName, targetGroupName);
+        boolean found;
+        List<HostPort> tmpServers = Lists.newArrayList(servers);
+        do {
+          found = false;
+          for (Iterator<HostPort> iter = tmpServers.iterator();
+               iter.hasNext(); ) {
+            HostPort rs = iter.next();
+            //get online regions
+            List<HRegionInfo> regions = new LinkedList<HRegionInfo>();
+            for (Map.Entry<HRegionInfo, ServerName> el :
+                master.getAssignmentManager().getRegionStates().getRegionAssignments().entrySet()) {
+              if (el.getValue().getHostPort().equals(rs)) {
+                regions.add(el.getKey());
+              }
             }
-          }
-          for(RegionState state :
-              master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) {
-            if (state.getServerName().getHostPort().equals(rs)) {
-              regions.add(state.getRegion());
+            for (RegionState state :
+                master.getAssignmentManager().getRegionStates().getRegionsInTransition().values()) {
+              if (state.getServerName().getHostPort().equals(rs)) {
+                regions.add(state.getRegion());
+              }
             }
-          }
 
-          //unassign regions for a server
-          LOG.info("Unassigning "+regions.size()+
-              " regions from server "+rs+" for move to "+targetGroupName);
-          if(regions.size() > 0) {
-            //TODO bulk unassign or throttled unassign?
-            for(HRegionInfo region: regions) {
-              //regions might get assigned from tables of target group
-              //so we need to filter
-              if(!targetGrp.containsTable(region.getTable())) {
-                master.getAssignmentManager().unassign(region);
-                found = true;
+            //unassign regions for a server
+            LOG.info("Unassigning " + regions.size() +
+                " regions from server " + rs + " for move to " + targetGroupName);
+            if (regions.size() > 0) {
+              //TODO bulk unassign or throttled unassign?
+              for (HRegionInfo region : regions) {
+                //regions might get assigned from tables of target group
+                //so we need to filter
+                if (!targetGrp.containsTable(region.getTable())) {
+                  master.getAssignmentManager().unassign(region);
+                  found = true;
+                }
               }
             }
+            if (!found) {
+              iter.remove();
+            }
           }
-          if(!found) {
-            iter.remove();
-            serversInTransition.remove(rs);
+          try {
+            Thread.sleep(1000);
+          } catch (InterruptedException e) {
+            LOG.warn("Sleep interrupted", e);
           }
+        } while (found);
+      } finally {
+        //remove from transition
+        for (HostPort server : servers) {
+          serversInTransition.remove(server);
         }
-        try {
-          Thread.sleep(1000);
-        } catch (InterruptedException e) {
-          LOG.warn("Sleep interrupted", e);
-        }
-      } while(found);
+      }
 
       LOG.info("Move server done: "+sourceGroupName+"->"+targetGroupName);
       if (master.getMasterCoprocessorHost() != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java
index ae5fcac..67b6e90 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupBasedLoadBalancer.java
@@ -34,12 +34,10 @@ import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HostPort;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.balancer.StochasticLoadBalancer;
-import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.util.ReflectionUtils;
 
 import java.io.IOException;
@@ -62,7 +60,7 @@ import java.util.TreeMap;
  * table is online and Offline - when it is unavailable.
  *
  * During Offline, assignments are assigned based on cached information in zookeeper.
- * If unavailable (ie bootstrap) then regions are assigned randombly.
+ * If unavailable (ie bootstrap) then regions are assigned randomly.
  *
  * Once the GROUP table has been assigned, the balancer switches to Online and will then
  * start providing appropriate assignments for user tables.
@@ -150,8 +148,8 @@ public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer {
   public Map<ServerName, List<HRegionInfo>> roundRobinAssignment (
       List<HRegionInfo> regions, List<ServerName> servers) throws HBaseIOException {
     Map<ServerName, List<HRegionInfo>> assignments = Maps.newHashMap();
-    ListMultimap<String,HRegionInfo> regionMap = LinkedListMultimap.create();
-    ListMultimap<String,ServerName> serverMap = LinkedListMultimap.create();
+    ListMultimap<String,HRegionInfo> regionMap = ArrayListMultimap.create();
+    ListMultimap<String,ServerName> serverMap = ArrayListMultimap.create();
     generateGroupMaps(regions, servers, regionMap, serverMap);
     for(String groupKey : regionMap.keySet()) {
       if (regionMap.get(groupKey).size() > 0) {
@@ -170,27 +168,10 @@ public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer {
   @Override
   public Map<ServerName, List<HRegionInfo>> retainAssignment(
       Map<HRegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
-    if (!isOnline()) {
-      return offlineRetainAssignment(regions, servers);
-    }
-    return onlineRetainAssignment(regions, servers);
-  }
-
-  public Map<ServerName, List<HRegionInfo>> offlineRetainAssignment(
-      Map<HRegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
-      //We will just keep assignments even if they are incorrect.
-      //Chances are most will be assigned correctly.
-      //Then we just use balance to correct the misplaced few.
-      //we need to correct catalog and group table assignment anyway.
-      return internalBalancer.retainAssignment(regions, servers);
-  }
-
-  public Map<ServerName, List<HRegionInfo>> onlineRetainAssignment(
-      Map<HRegionInfo, ServerName> regions, List<ServerName> servers) throws HBaseIOException {
     try {
       Map<ServerName, List<HRegionInfo>> assignments = new TreeMap<ServerName, List<HRegionInfo>>();
       ListMultimap<String, HRegionInfo> groupToRegion = ArrayListMultimap.create();
-      List<HRegionInfo> misplacedRegions = getMisplacedRegions(regions);
+      Set<HRegionInfo> misplacedRegions = getMisplacedRegions(regions);
       for (HRegionInfo region : regions.keySet()) {
         if (!misplacedRegions.contains(region)) {
           String groupName = groupManager.getGroupOfTable(region.getTable());
@@ -207,8 +188,10 @@ public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer {
         for (HRegionInfo region : regionList) {
           currentAssignmentMap.put(region, regions.get(region));
         }
-        assignments.putAll(this.internalBalancer.retainAssignment(
-            currentAssignmentMap, candidateList));
+        if(candidateList.size() > 0) {
+          assignments.putAll(this.internalBalancer.retainAssignment(
+              currentAssignmentMap, candidateList));
+        }
       }
 
       for (HRegionInfo region : misplacedRegions) {
@@ -332,9 +315,9 @@ public class GroupBasedLoadBalancer implements GroupableBalancer, LoadBalancer {
     return regionGroup;
   }
 
-  private List<HRegionInfo> getMisplacedRegions(
+  private Set<HRegionInfo> getMisplacedRegions(
       Map<HRegionInfo, ServerName> regions) throws IOException {
-    List<HRegionInfo> misplacedRegions = new ArrayList<HRegionInfo>();
+    Set<HRegionInfo> misplacedRegions = new HashSet<HRegionInfo>();
     for (HRegionInfo region : regions.keySet()) {
       ServerName assignedServer = regions.get(region);
       GroupInfo info = groupManager.getGroup(groupManager.getGroupOfTable(region.getTable()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java
index a0df353..453fa8c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/GroupInfoManagerImpl.java
@@ -23,7 +23,6 @@ package org.apache.hadoop.hbase.group;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
-import com.google.protobuf.ByteString;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -57,6 +56,7 @@ import org.apache.hadoop.hbase.master.TableStateManager;
 import org.apache.hadoop.hbase.master.handler.CreateTableHandler;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
@@ -575,18 +575,11 @@ public class GroupInfoManagerImpl implements GroupInfoManager, ServerListener {
                       found.set(false);
                     } else if (tsm.isTableState(GROUP_TABLE_NAME, TableState.State.ENABLED)) {
                       try {
-                        HBaseProtos.RegionSpecifier regionSpecifier =
-                            HBaseProtos.RegionSpecifier.newBuilder()
-                                .setValue(ByteString.copyFrom(row.getRow()))
-                                .setType(
-                                    HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
-                                .build();
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-                        ClientProtos.GetRequest req =
-                            ClientProtos.GetRequest.newBuilder()
-                                .setRegion(regionSpecifier)
-                                .setGet(ProtobufUtil.toGet(new Get(ROW_KEY))).build();
-                        rs.get(null, req);
+                        ClientProtos.GetRequest request =
+                            RequestConverter.buildGetRequest(info.getRegionName(),
+                                new Get(ROW_KEY));
+                        rs.get(null, request);
                         assignedRegions.add(info);
                       } catch(Exception ex) {
                         LOG.debug("Caught exception while verifying group region", ex);
@@ -604,18 +597,11 @@ public class GroupInfoManagerImpl implements GroupInfoManager, ServerListener {
                     if (tsm.isTableState(TableName.NAMESPACE_TABLE_NAME,
                         TableState.State.ENABLED)) {
                       try {
-                        HBaseProtos.RegionSpecifier regionSpecifier =
-                            HBaseProtos.RegionSpecifier.newBuilder()
-                                .setValue(ByteString.copyFrom(row.getRow()))
-                                .setType(
-                                    HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME)
-                                .build();
                         ClientProtos.ClientService.BlockingInterface rs = conn.getClient(sn);
-                        ClientProtos.GetRequest req =
-                            ClientProtos.GetRequest.newBuilder()
-                                .setRegion(regionSpecifier)
-                                .setGet(ProtobufUtil.toGet(new Get(ROW_KEY))).build();
-                        rs.get(null, req);
+                        ClientProtos.GetRequest request =
+                            RequestConverter.buildGetRequest(info.getRegionName(),
+                                new Get(ROW_KEY));
+                        rs.get(null, request);
                         nsFound.set(true);
                       } catch(Exception ex) {
                         LOG.debug("Caught exception while verifying group region", ex);

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java
index a19b24e..6ccd0ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBean.java
@@ -39,10 +39,12 @@ public interface MXBean {
     private String name;
     private List<HostPort> servers;
     private List<TableName> tables;
+    private List<HostPort> offlineServers;
 
     //Need this to convert NavigableSet to List
-    public GroupInfoBean(GroupInfo groupInfo) {
+    public GroupInfoBean(GroupInfo groupInfo, List<HostPort> offlineServers) {
       this.name = groupInfo.getName();
+      this.offlineServers = offlineServers;
       this.servers = new LinkedList<HostPort>();
       this.servers.addAll(groupInfo.getServers());
       this.tables = new LinkedList<TableName>();
@@ -57,6 +59,10 @@ public interface MXBean {
       return servers;
     }
 
+    public List<HostPort> getOfflineServers() {
+      return offlineServers;
+    }
+
     public List<TableName> getTables() {
       return tables;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java
index b0894eb..5836d2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/group/MXBeanImpl.java
@@ -20,6 +20,8 @@
 
 package org.apache.hadoop.hbase.group;
 
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HostPort;
@@ -31,6 +33,7 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 public class MXBeanImpl implements MXBean {
   private static final Log LOG = LogFactory.getLog(MXBeanImpl.class);
@@ -72,9 +75,19 @@ public class MXBeanImpl implements MXBean {
 
   @Override
   public List<GroupInfoBean> getGroups() throws IOException {
-    LinkedList list = new LinkedList();
-    for(GroupInfo group: groupAdmin.listGroups()) {
-      list.add(new GroupInfoBean(group));
+    Set<HostPort> onlineServers = Sets.newHashSet();
+    for (ServerName entry: master.getServerManager().getOnlineServersList()) {
+      onlineServers.add(new HostPort(entry.getHostname(), entry.getPort()));
+    }
+    List list = Lists.newArrayList();
+    for (GroupInfo group: groupAdmin.listGroups()) {
+      List<HostPort> deadServers = Lists.newArrayList();
+      for (HostPort server: group.getServers()) {
+        if (!onlineServers.contains(server)) {
+          deadServers.add(server);
+        }
+      }
+      list.add(new GroupInfoBean(group, deadServers));
     }
     return list;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fade887a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 248aafc..9b6a601 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -1222,7 +1222,13 @@ public class AssignmentManager {
           || existingPlan.getDestination() == null
           || !destServers.contains(existingPlan.getDestination())) {
         newPlan = true;
-        randomPlan = new RegionPlan(region, null, balancer.randomAssignment(region, destServers));
+        try {
+          randomPlan = new RegionPlan(region, null,
+              balancer.randomAssignment(region, destServers));
+        } catch (IOException ex) {
+          LOG.warn("Failed to create new plan.",ex);
+          return null;
+        }
         if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
           List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
           regions.add(region);