You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ch...@apache.org on 2017/11/05 02:00:57 UTC

[1/3] hbase git commit: HBASE-19131 Add the ClusterStatus hook and cleanup other hooks which can be replaced by ClusterStatus hook

Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 4d7c40af9 -> 9ad69e446


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 03d5123..bb0e63b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.Coprocessor;
@@ -674,7 +675,7 @@ public interface MasterObserver extends Coprocessor {
    */
   void postListSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final SnapshotDescription snapshot) throws IOException;
-  
+
   /**
    * Called before a snapshot is cloned.
    * Called as part of restoreSnapshot RPC call.
@@ -921,7 +922,7 @@ public interface MasterObserver extends Coprocessor {
    */
   void postTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final TableName tableName) throws IOException;
-  
+
   /**
    * Called before the quota for the user is stored.
    * @param ctx the environment to interact with the framework and master
@@ -1027,7 +1028,7 @@ public interface MasterObserver extends Coprocessor {
       final String namespace, final Quotas quotas) throws IOException;
 
   /**
-   * Called before dispatching region merge request. 
+   * Called before dispatching region merge request.
    * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
    * @param ctx coprocessor environment
    * @param regionA first region to be merged
@@ -1036,7 +1037,7 @@ public interface MasterObserver extends Coprocessor {
    */
   void preDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException;
-  
+
   /**
    * called after dispatching the region merge request.
    * @param c coprocessor environment
@@ -1048,14 +1049,15 @@ public interface MasterObserver extends Coprocessor {
       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException;
 
   /**
-   * Called before list dead region servers.
+   * Called before get cluster status.
    */
-  void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException;
+  void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException;
 
   /**
-   * Called after list dead region servers.
+   * Called after get cluster status.
    */
-  void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException;
+  void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      ClusterStatus status) throws IOException;
 
   /**
    * Called before clear dead region servers.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 9b41bbf..558d303 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -828,7 +828,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     //initialize load balancer
     this.balancer.setMasterServices(this);
-    this.balancer.setClusterStatus(getClusterStatus());
+    this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
     this.balancer.initialize();
 
     // Check if master is shutting down because of some issue
@@ -860,7 +860,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.assignmentManager.joinCluster();
 
     // set cluster status again after user regions are assigned
-    this.balancer.setClusterStatus(getClusterStatus());
+    this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
 
     // Start balancer and meta catalog janitor after meta and regions have been assigned.
     status.setStatus("Starting balancer and catalog janitor");
@@ -1488,7 +1488,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
       List<RegionPlan> plans = new ArrayList<RegionPlan>();
 
       //Give the balancer the current cluster state.
-      this.balancer.setClusterStatus(getClusterStatus());
+      this.balancer.setClusterStatus(getClusterStatusWithoutCoprocessor());
       for (Entry<TableName, Map<ServerName, List<HRegionInfo>>> e : assignmentsByTable.entrySet()) {
         List<RegionPlan> partialPlans = this.balancer.balanceCluster(e.getKey(), e.getValue());
         if (partialPlans != null) plans.addAll(partialPlans);
@@ -2404,10 +2404,22 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     }
   }
 
+  public ClusterStatus getClusterStatus() throws IOException {
+    if (cpHost != null) {
+      cpHost.preGetClusterStatus();
+    }
+    ClusterStatus status = getClusterStatusWithoutCoprocessor();
+    LOG.info(getClientIdAuditPrefix() + " get ClusterStatus, status=" + status);
+    if (cpHost != null) {
+      cpHost.postGetClusterStatus(status);
+    }
+    return status;
+  }
+
   /**
    * @return cluster status
    */
-  public ClusterStatus getClusterStatus() throws InterruptedIOException {
+  public ClusterStatus getClusterStatusWithoutCoprocessor() throws InterruptedIOException {
     // Build Set of backup masters from ZK nodes
     List<String> backupMasterStrings;
     try {
@@ -3154,12 +3166,12 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
   @Override
   public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
-    return getClusterStatus().getLastMajorCompactionTsForTable(table);
+    return getClusterStatusWithoutCoprocessor().getLastMajorCompactionTsForTable(table);
   }
 
   @Override
   public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
-    return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
+    return getClusterStatusWithoutCoprocessor().getLastMajorCompactionTsForRegion(regionName);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 78c7925..c62057a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -27,6 +27,7 @@ import org.apache.commons.lang.ClassUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -919,7 +920,7 @@ public class MasterCoprocessorHost
       }
     });
   }
-  
+
   public void preCloneSnapshot(final SnapshotDescription snapshot,
       final HTableDescriptor hTableDescriptor) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
@@ -1071,7 +1072,7 @@ public class MasterCoprocessorHost
       }
     });
   }
-  
+
   public void preSetUserQuota(final String user, final Quotas quotas) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
@@ -1166,7 +1167,7 @@ public class MasterCoprocessorHost
     });
   }
 
-  public void postSetNamespaceQuota(final String namespace, final Quotas quotas) 
+  public void postSetNamespaceQuota(final String namespace, final Quotas quotas)
       throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
@@ -1177,22 +1178,22 @@ public class MasterCoprocessorHost
     });
   }
 
-  public void preListDeadServers() throws IOException {
+  public void preGetClusterStatus() throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
               throws IOException {
-        oserver.preListDeadServers(ctx);
+        oserver.preGetClusterStatus(ctx);
       }
     });
   }
 
-  public void postListDeadServers() throws IOException {
+  public void postGetClusterStatus(final ClusterStatus status) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
               throws IOException {
-        oserver.postListDeadServers(ctx);
+        oserver.postGetClusterStatus(ctx, status);
       }
     });
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 738e8fb..8cc2c03 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -125,8 +125,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshot
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
@@ -1141,33 +1139,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public ListDeadServersResponse listDeadServers(RpcController controller,
-      ListDeadServersRequest request) throws ServiceException {
-
-    LOG.debug(master.getClientIdAuditPrefix() + " list dead region servers.");
-    ListDeadServersResponse.Builder response = ListDeadServersResponse.newBuilder();
-    try {
-      master.checkInitialized();
-      if (master.cpHost != null) {
-        master.cpHost.preListDeadServers();
-      }
-
-      Set<ServerName> servers = master.getServerManager().getDeadServers().copyServerNames();
-      for (ServerName server : servers) {
-        response.addServerName(ProtobufUtil.toServerName(server));
-      }
-
-      if (master.cpHost != null) {
-        master.cpHost.postListDeadServers();
-      }
-    } catch (IOException io) {
-      throw new ServiceException(io);
-    }
-
-    return response.build();
-  }
-
-  @Override
   public ClearDeadServersResponse clearDeadServers(RpcController controller,
       ClearDeadServersRequest request) throws ServiceException {
     LOG.debug(master.getClientIdAuditPrefix() + " clear dead region servers.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
index 58e5808..8f3e523 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterStatusChore.java
@@ -46,7 +46,7 @@ public class ClusterStatusChore extends ScheduledChore {
   @Override
   protected void chore() {
     try {
-      balancer.setClusterStatus(master.getClusterStatus());
+      balancer.setClusterStatus(master.getClusterStatusWithoutCoprocessor());
     } catch (InterruptedIOException e) {
       LOG.warn("Ignoring interruption", e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 550b98e..17da4a0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -770,7 +771,7 @@ public class AccessController extends BaseMasterAndRegionObserver
           }
         }
       } else if (entry.getValue() == null) {
-        get.addFamily(col);        
+        get.addFamily(col);
       } else {
         throw new RuntimeException("Unhandled collection type " +
           entry.getValue().getClass().getName());
@@ -1338,7 +1339,7 @@ public class AccessController extends BaseMasterAndRegionObserver
       requirePermission("listSnapshot " + snapshot.getName(), Action.ADMIN);
     }
   }
-  
+
   @Override
   public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)
@@ -1412,7 +1413,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   @Override
   public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
       NamespaceDescriptor ns) throws IOException {
-    // We require only global permission so that 
+    // We require only global permission so that
     // a user with NS admin cannot altering namespace configurations. i.e. namespace quota
     requireGlobalPermission("modifyNamespace", Action.ADMIN, ns.getName());
   }
@@ -2606,14 +2607,6 @@ public class AccessController extends BaseMasterAndRegionObserver
   }
 
   @Override
-  public void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-      throws IOException { }
-
-  @Override
-  public void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-      throws IOException { }
-
-  @Override
   public void preClearDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {
     requirePermission("clearDeadServers", Action.ADMIN);
@@ -2676,7 +2669,7 @@ public class AccessController extends BaseMasterAndRegionObserver
   public void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
       List<WALEntry> entries, CellScanner cells) throws IOException {
   }
-  
+
   @Override
   public void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final String userName, final Quotas quotas) throws IOException {
@@ -2742,4 +2735,10 @@ public class AccessController extends BaseMasterAndRegionObserver
       String groupName) throws IOException {
     requirePermission("balanceRSGroup", Action.ADMIN);
   }
+
+  @Override
+  public void preGetClusterStatus(final ObserverContext<MasterCoprocessorEnvironment> ctx)
+      throws IOException {
+    requirePermission("getClusterStatus", Action.ADMIN);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientClusterStatus.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientClusterStatus.java
new file mode 100644
index 0000000..197b0d9
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientClusterStatus.java
@@ -0,0 +1,188 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test the ClusterStatus.
+ */
+@Category(SmallTests.class)
+public class TestClientClusterStatus {
+  private static HBaseTestingUtility UTIL;
+  private static HBaseAdmin ADMIN;
+  private final static int SLAVES = 5;
+  private final static int MASTERS = 3;
+  private static MiniHBaseCluster CLUSTER;
+  private static HRegionServer DEAD;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, MyObserver.class.getName());
+    UTIL = new HBaseTestingUtility(conf);
+    UTIL.startMiniCluster(MASTERS, SLAVES);
+    CLUSTER = UTIL.getHBaseCluster();
+    CLUSTER.waitForActiveAndReadyMaster();
+    ADMIN = UTIL.getHBaseAdmin();
+    // Kill one region server
+    List<RegionServerThread> rsts = CLUSTER.getLiveRegionServerThreads();
+    RegionServerThread rst = rsts.get(rsts.size() - 1);
+    DEAD = rst.getRegionServer();
+    DEAD.stop("Test dead servers status");
+    while (rst.isAlive()) {
+      Thread.sleep(500);
+    }
+  }
+
+  @Test
+  public void testDefaults() throws Exception {
+    ClusterStatus origin = ADMIN.getClusterStatus();
+    ClusterStatus defaults = ADMIN.getClusterStatus();
+    Assert.assertEquals(origin.getHBaseVersion(), defaults.getHBaseVersion());
+    Assert.assertEquals(origin.getClusterId(), defaults.getClusterId());
+    Assert.assertTrue(origin.getAverageLoad() == defaults.getAverageLoad());
+    Assert.assertTrue(origin.getBackupMastersSize() == defaults.getBackupMastersSize());
+    Assert.assertTrue(origin.getDeadServers() == defaults.getDeadServers());
+    Assert.assertTrue(origin.getRegionsCount() == defaults.getRegionsCount());
+    Assert.assertTrue(origin.getServersSize() == defaults.getServersSize());
+    Assert.assertTrue(origin.equals(defaults));
+  }
+
+
+  @Test
+  public void testLiveAndDeadServersStatus() throws Exception {
+    List<RegionServerThread> regionserverThreads = CLUSTER.getLiveRegionServerThreads();
+    int numRs = 0;
+    int len = regionserverThreads.size();
+    for (int i = 0; i < len; i++) {
+      if (regionserverThreads.get(i).isAlive()) {
+        numRs++;
+      }
+    }
+    // Retrieve live servers and dead servers info.
+    ClusterStatus status = ADMIN.getClusterStatus();
+    Assert.assertNotNull(status);
+    Assert.assertNotNull(status.getServers());
+    // exclude a dead region server
+    Assert.assertEquals(SLAVES -1, numRs);
+    // live servers = nums of regionservers
+    // By default, HMaster don't carry any regions so it won't report its load.
+    // Hence, it won't be in the server list.
+    Assert.assertEquals(status.getServers().size(), numRs);
+    Assert.assertTrue(status.getRegionsCount() > 0);
+    Assert.assertNotNull(status.getDeadServerNames());
+    Assert.assertEquals(1, status.getDeadServers());
+    ServerName deadServerName = status.getDeadServerNames().iterator().next();
+    Assert.assertEquals(DEAD.getServerName(), deadServerName);
+  }
+
+  @Test
+  public void testMasterAndBackupMastersStatus() throws Exception {
+    // get all the master threads
+    List<MasterThread> masterThreads = CLUSTER.getMasterThreads();
+    int numActive = 0;
+    int activeIndex = 0;
+    ServerName activeName = null;
+    HMaster active = null;
+    for (int i = 0; i < masterThreads.size(); i++) {
+      if (masterThreads.get(i).getMaster().isActiveMaster()) {
+        numActive++;
+        activeIndex = i;
+        active = masterThreads.get(activeIndex).getMaster();
+        activeName = active.getServerName();
+      }
+    }
+    Assert.assertNotNull(active);
+    Assert.assertEquals(1, numActive);
+    Assert.assertEquals(MASTERS, masterThreads.size());
+    // Retrieve master and backup masters infos only.
+    ClusterStatus status = ADMIN.getClusterStatus();
+    Assert.assertTrue(status.getMaster().equals(activeName));
+    Assert.assertEquals(MASTERS - 1, status.getBackupMastersSize());
+  }
+
+  @Test
+  public void testOtherStatusInfos() throws Exception {
+    ClusterStatus status = ADMIN.getClusterStatus();
+    Assert.assertTrue(status.getMasterCoprocessors().length == 1);
+    Assert.assertNotNull(status.getHBaseVersion());
+    Assert.assertNotNull(status.getClusterId());
+    Assert.assertNotNull(status.getBalancerOn());
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    if (ADMIN != null) ADMIN.close();
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testObserver() throws IOException {
+    int preCount = MyObserver.PRE_COUNT.get();
+    int postCount = MyObserver.POST_COUNT.get();
+    boolean find = false;
+    for (String s : ADMIN.getClusterStatus().getMasterCoprocessors()) {
+      if (s.equals(MyObserver.class.getSimpleName())) {
+        find = true;
+      }
+    }
+    Assert.assertTrue(find);
+    Assert.assertEquals(preCount + 1, MyObserver.PRE_COUNT.get());
+    Assert.assertEquals(postCount + 1, MyObserver.POST_COUNT.get());
+  }
+
+  public static class MyObserver extends BaseMasterObserver {
+    private static final AtomicInteger PRE_COUNT = new AtomicInteger(0);
+    private static final AtomicInteger POST_COUNT = new AtomicInteger(0);
+
+
+    @Override
+    public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
+      throws IOException {
+      PRE_COUNT.incrementAndGet();
+    }
+
+    @Override public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      ClusterStatus status) throws IOException {
+      POST_COUNT.incrementAndGet();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index b20d7bc..eb62a35 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -36,6 +36,7 @@ import java.util.concurrent.CountDownLatch;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -272,13 +273,13 @@ public class TestMasterObserver {
     }
 
     @Override
-    public void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
+    public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
         throws IOException {
     }
 
     @Override
-    public void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-        throws IOException {
+    public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+        ClusterStatus status) throws IOException {
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 38960f7..daeba9a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -1568,7 +1568,7 @@ public class TestSplitTransactionOnCluster {
     }
   }
 
-  private void waitUntilRegionServerDead() throws InterruptedException, InterruptedIOException {
+  private void waitUntilRegionServerDead() throws InterruptedException, IOException {
     // Wait until the master processes the RS shutdown
     for (int i=0; cluster.getMaster().getClusterStatus().
         getServers().size() > NB_SERVERS && i<100; i++) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 5aedbf8..5770a41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -3040,4 +3040,18 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
+
+  @Test
+  public void testGetClusterStatus() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+      ACCESS_CONTROLLER.preGetClusterStatus(ObserverContext.createAndPrepare(CP_ENV, null));
+      return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
 }


[2/3] hbase git commit: HBASE-19131 Add the ClusterStatus hook and cleanup other hooks which can be replaced by ClusterStatus hook

Posted by ch...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index da111a4..8a39f28 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -60631,1065 +60631,6 @@ public final class MasterProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.SecurityCapabilitiesResponse)
   }
 
-  public interface ListDeadServersRequestOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListDeadServersRequest}
-   */
-  public static final class ListDeadServersRequest extends
-      com.google.protobuf.GeneratedMessage
-      implements ListDeadServersRequestOrBuilder {
-    // Use ListDeadServersRequest.newBuilder() to construct.
-    private ListDeadServersRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ListDeadServersRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ListDeadServersRequest defaultInstance;
-    public static ListDeadServersRequest getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ListDeadServersRequest getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ListDeadServersRequest(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersRequest_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersRequest_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ListDeadServersRequest> PARSER =
-        new com.google.protobuf.AbstractParser<ListDeadServersRequest>() {
-      public ListDeadServersRequest parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ListDeadServersRequest(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ListDeadServersRequest> getParserForType() {
-      return PARSER;
-    }
-
-    private void initFields() {
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest) obj;
-
-      boolean result = true;
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ListDeadServersRequest}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequestOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersRequest_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersRequest_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersRequest_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest(this);
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ListDeadServersRequest)
-    }
-
-    static {
-      defaultInstance = new ListDeadServersRequest(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ListDeadServersRequest)
-  }
-
-  public interface ListDeadServersResponseOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // repeated .hbase.pb.ServerName server_name = 1;
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
-        getServerNameList();
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index);
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    int getServerNameCount();
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
-        getServerNameOrBuilderList();
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
-        int index);
-  }
-  /**
-   * Protobuf type {@code hbase.pb.ListDeadServersResponse}
-   */
-  public static final class ListDeadServersResponse extends
-      com.google.protobuf.GeneratedMessage
-      implements ListDeadServersResponseOrBuilder {
-    // Use ListDeadServersResponse.newBuilder() to construct.
-    private ListDeadServersResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private ListDeadServersResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final ListDeadServersResponse defaultInstance;
-    public static ListDeadServersResponse getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public ListDeadServersResponse getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private ListDeadServersResponse(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-                serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>();
-                mutable_bitField0_ |= 0x00000001;
-              }
-              serverName_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-          serverName_ = java.util.Collections.unmodifiableList(serverName_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersResponse_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersResponse_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<ListDeadServersResponse> PARSER =
-        new com.google.protobuf.AbstractParser<ListDeadServersResponse>() {
-      public ListDeadServersResponse parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ListDeadServersResponse(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<ListDeadServersResponse> getParserForType() {
-      return PARSER;
-    }
-
-    // repeated .hbase.pb.ServerName server_name = 1;
-    public static final int SERVER_NAME_FIELD_NUMBER = 1;
-    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> serverName_;
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
-      return serverName_;
-    }
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
-        getServerNameOrBuilderList() {
-      return serverName_;
-    }
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    public int getServerNameCount() {
-      return serverName_.size();
-    }
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
-      return serverName_.get(index);
-    }
-    /**
-     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
-        int index) {
-      return serverName_.get(index);
-    }
-
-    private void initFields() {
-      serverName_ = java.util.Collections.emptyList();
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      for (int i = 0; i < getServerNameCount(); i++) {
-        if (!getServerName(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      for (int i = 0; i < serverName_.size(); i++) {
-        output.writeMessage(1, serverName_.get(i));
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      for (int i = 0; i < serverName_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, serverName_.get(i));
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse) obj;
-
-      boolean result = true;
-      result = result && getServerNameList()
-          .equals(other.getServerNameList());
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (getServerNameCount() > 0) {
-        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getServerNameList().hashCode();
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.ListDeadServersResponse}
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponseOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersResponse_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersResponse_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getServerNameFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        if (serverNameBuilder_ == null) {
-          serverName_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-        } else {
-          serverNameBuilder_.clear();
-        }
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDeadServersResponse_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse build() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse(this);
-        int from_bitField0_ = bitField0_;
-        if (serverNameBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001)) {
-            serverName_ = java.util.Collections.unmodifiableList(serverName_);
-            bitField0_ = (bitField0_ & ~0x00000001);
-          }
-          result.serverName_ = serverName_;
-        } else {
-          result.serverName_ = serverNameBuilder_.build();
-        }
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance()) return this;
-        if (serverNameBuilder_ == null) {
-          if (!other.serverName_.isEmpty()) {
-            if (serverName_.isEmpty()) {
-              serverName_ = other.serverName_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-            } else {
-              ensureServerNameIsMutable();
-              serverName_.addAll(other.serverName_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.serverName_.isEmpty()) {
-            if (serverNameBuilder_.isEmpty()) {
-              serverNameBuilder_.dispose();
-              serverNameBuilder_ = null;
-              serverName_ = other.serverName_;
-              bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
-                com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
-                   getServerNameFieldBuilder() : null;
-            } else {
-              serverNameBuilder_.addAllMessages(other.serverName_);
-            }
-          }
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        for (int i = 0; i < getServerNameCount(); i++) {
-          if (!getServerName(i).isInitialized()) {
-
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // repeated .hbase.pb.ServerName server_name = 1;
-      private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> serverName_ =
-        java.util.Collections.emptyList();
-      private void ensureServerNameIsMutable() {
-        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
-          serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(serverName_);
-          bitField0_ |= 0x00000001;
-         }
-      }
-
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
-
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
-        if (serverNameBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(serverName_);
-        } else {
-          return serverNameBuilder_.getMessageList();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public int getServerNameCount() {
-        if (serverNameBuilder_ == null) {
-          return serverName_.size();
-        } else {
-          return serverNameBuilder_.getCount();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
-        if (serverNameBuilder_ == null) {
-          return serverName_.get(index);
-        } else {
-          return serverNameBuilder_.getMessage(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder setServerName(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (serverNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureServerNameIsMutable();
-          serverName_.set(index, value);
-          onChanged();
-        } else {
-          serverNameBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder setServerName(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (serverNameBuilder_ == null) {
-          ensureServerNameIsMutable();
-          serverName_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          serverNameBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder addServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (serverNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureServerNameIsMutable();
-          serverName_.add(value);
-          onChanged();
-        } else {
-          serverNameBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder addServerName(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (serverNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureServerNameIsMutable();
-          serverName_.add(index, value);
-          onChanged();
-        } else {
-          serverNameBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder addServerName(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (serverNameBuilder_ == null) {
-          ensureServerNameIsMutable();
-          serverName_.add(builderForValue.build());
-          onChanged();
-        } else {
-          serverNameBuilder_.addMessage(builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder addServerName(
-          int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (serverNameBuilder_ == null) {
-          ensureServerNameIsMutable();
-          serverName_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          serverNameBuilder_.addMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder addAllServerName(
-          java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
-        if (serverNameBuilder_ == null) {
-          ensureServerNameIsMutable();
-          super.addAll(values, serverName_);
-          onChanged();
-        } else {
-          serverNameBuilder_.addAllMessages(values);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder clearServerName() {
-        if (serverNameBuilder_ == null) {
-          serverName_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000001);
-          onChanged();
-        } else {
-          serverNameBuilder_.clear();
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public Builder removeServerName(int index) {
-        if (serverNameBuilder_ == null) {
-          ensureServerNameIsMutable();
-          serverName_.remove(index);
-          onChanged();
-        } else {
-          serverNameBuilder_.remove(index);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder(
-          int index) {
-        return getServerNameFieldBuilder().getBuilder(index);
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
-          int index) {
-        if (serverNameBuilder_ == null) {
-          return serverName_.get(index);  } else {
-          return serverNameBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
-           getServerNameOrBuilderList() {
-        if (serverNameBuilder_ != null) {
-          return serverNameBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(serverName_);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() {
-        return getServerNameFieldBuilder().addBuilder(
-            org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder(
-          int index) {
-        return getServerNameFieldBuilder().addBuilder(
-            index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
-           getServerNameBuilderList() {
-        return getServerNameFieldBuilder().getBuilderList();
-      }
-      private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
-          getServerNameFieldBuilder() {
-        if (serverNameBuilder_ == null) {
-          serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  serverName_,
-                  ((bitField0_ & 0x00000001) == 0x00000001),
-                  getParentForChildren(),
-                  isClean());
-          serverName_ = null;
-        }
-        return serverNameBuilder_;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.ListDeadServersResponse)
-    }
-
-    static {
-      defaultInstance = new ListDeadServersResponse(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.ListDeadServersResponse)
-  }
-
   public interface ClearDeadServersRequestOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -61697,7 +60638,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> 
         getServerNameList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -61710,7 +60651,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -61831,7 +60772,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList() {
       return serverName_;
     }
@@ -62119,7 +61060,7 @@ public final class MasterProtos {
               serverNameBuilder_ = null;
               serverName_ = other.serverName_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
+              serverNameBuilder_ = 
                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
                    getServerNameFieldBuilder() : null;
             } else {
@@ -62134,7 +61075,7 @@ public final class MasterProtos {
       public final boolean isInitialized() {
         for (int i = 0; i < getServerNameCount(); i++) {
           if (!getServerName(i).isInitialized()) {
-
+            
             return false;
           }
         }
@@ -62355,7 +61296,7 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
            getServerNameOrBuilderList() {
         if (serverNameBuilder_ != null) {
           return serverNameBuilder_.getMessageOrBuilderList();
@@ -62381,12 +61322,12 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder> 
            getServerNameBuilderList() {
         return getServerNameFieldBuilder().getBuilderList();
       }
       private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
           getServerNameFieldBuilder() {
         if (serverNameBuilder_ == null) {
           serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
@@ -62418,7 +61359,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
+    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> 
         getServerNameList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -62431,7 +61372,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -62552,7 +61493,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList() {
       return serverName_;
     }
@@ -62840,7 +61781,7 @@ public final class MasterProtos {
               serverNameBuilder_ = null;
               serverName_ = other.serverName_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
+              serverNameBuilder_ = 
                 com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
                    getServerNameFieldBuilder() : null;
             } else {
@@ -62855,7 +61796,7 @@ public final class MasterProtos {
       public final boolean isInitialized() {
         for (int i = 0; i < getServerNameCount(); i++) {
           if (!getServerName(i).isInitialized()) {
-
+            
             return false;
           }
         }
@@ -63076,7 +62017,7 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+      public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
            getServerNameOrBuilderList() {
         if (serverNameBuilder_ != null) {
           return serverNameBuilder_.getMessageOrBuilderList();
@@ -63102,12 +62043,12 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
+      public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder> 
            getServerNameBuilderList() {
         return getServerNameFieldBuilder().getBuilderList();
       }
       private com.google.protobuf.RepeatedFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
           getServerNameFieldBuilder() {
         if (serverNameBuilder_ == null) {
           serverNameBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
@@ -63904,18 +62845,6 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request,
           com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse> done);
 
-      /**
-       * <code>rpc ListDeadServers(.hbase.pb.ListDeadServersRequest) returns (.hbase.pb.ListDeadServersResponse);</code>
-       *
-       * <pre>
-       ** Returns a list of Dead Servers.
-       * </pre>
-       */
-      public abstract void listDeadServers(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse> done);
-
     }
 
     public static com.google.protobuf.Service newReflectiveService(
@@ -64409,14 +63338,6 @@ public final class MasterProtos {
           impl.clearDeadServers(controller, request, done);
         }
 
-        @java.lang.Override
-        public  void listDeadServers(
-            com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request,
-            com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse> done) {
-          impl.listDeadServers(controller, request, done);
-        }
-
       };
     }
 
@@ -64561,8 +63482,6 @@ public final class MasterProtos {
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             case 60:
               return impl.clearDeadServers(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest)request);
-            case 61:
-              return impl.listDeadServers(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -64699,8 +63618,6 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             case 60:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance();
-            case 61:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -64837,8 +63754,6 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             case 60:
               return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance();
-            case 61:
-              return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -65611,18 +64526,6 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request,
         com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse> done);
 
-    /**
-     * <code>rpc ListDeadServers(.hbase.pb.ListDeadServersRequest) returns (.hbase.pb.ListDeadServersResponse);</code>
-     *
-     * <pre>
-     ** Returns a list of Dead Servers.
-     * </pre>
-     */
-    public abstract void listDeadServers(
-        com.google.protobuf.RpcController controller,
-        org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request,
-        com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse> done);
-
     public static final
         com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -65950,11 +64853,6 @@ public final class MasterProtos {
             com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse>specializeCallback(
               done));
           return;
-        case 61:
-          this.listDeadServers(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest)request,
-            com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse>specializeCallback(
-              done));
-          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -66091,8 +64989,6 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         case 60:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest.getDefaultInstance();
-        case 61:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -66229,8 +65125,6 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         case 60:
           return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance();
-        case 61:
-          return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -67166,21 +66060,6 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.class,
             org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance()));
       }
-
-      public  void listDeadServers(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request,
-          com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse> done) {
-        channel.callMethod(
-          getDescriptor().getMethods().get(61),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance(),
-          com.google.protobuf.RpcUtil.generalizeCallback(
-            done,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.class,
-            org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance()));
-      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -67493,11 +66372,6 @@ public final class MasterProtos {
           com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersRequest request)
           throws com.google.protobuf.ServiceException;
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse listDeadServers(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request)
-          throws com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -68238,18 +67112,6 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ClearDeadServersResponse.getDefaultInstance());
       }
 
-
-      public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse listDeadServers(
-          com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest request)
-          throws com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(61),
-          controller,
-          request,
-          org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersResponse.getDefaultInstance());
-      }
-
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -68831,16 +67693,6 @@ public final class MasterProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ListDeadServersRequest_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_ListDeadServersRequest_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ListDeadServersResponse_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_ListDeadServersResponse_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ClearDeadServersRequest_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -69053,159 +67905,154 @@ public final class MasterProtos {
       "nse.Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_" +
       "AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATIO" +
       "N\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZA" +
-      "TION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"\030\n\026ListDeadS" +
-      "erversRequest\"D\n\027ListDeadServersResponse" +
-      "\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerN" +
-      "ame\"D\n\027ClearDeadServersRequest\022)\n\013server" +
-      "_name\030\001 \003(\0132\024.hbase.pb.ServerName\"E\n\030Cle" +
-      "arDeadServersResponse\022)\n\013server_name\030\001 \003",
-      "(\0132\024.hbase.pb.ServerName*(\n\020MasterSwitch" +
-      "Type\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\315,\n\rMasterSer" +
-      "vice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb." +
-      "GetSchemaAlterStatusRequest\032&.hbase.pb.G" +
-      "etSchemaAlterStatusResponse\022b\n\023GetTableD" +
-      "escriptors\022$.hbase.pb.GetTableDescriptor" +
-      "sRequest\032%.hbase.pb.GetTableDescriptorsR" +
-      "esponse\022P\n\rGetTableNames\022\036.hbase.pb.GetT" +
-      "ableNamesRequest\032\037.hbase.pb.GetTableName" +
-      "sResponse\022Y\n\020GetClusterStatus\022!.hbase.pb",
-      ".GetClusterStatusRequest\032\".hbase.pb.GetC" +
-      "lusterStatusResponse\022V\n\017IsMasterRunning\022" +
-      " .hbase.pb.IsMasterRunningRequest\032!.hbas" +
-      "e.pb.IsMasterRunningResponse\022D\n\tAddColum" +
-      "n\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb" +
-      ".AddColumnResponse\022M\n\014DeleteColumn\022\035.hba" +
-      "se.pb.DeleteColumnRequest\032\036.hbase.pb.Del" +
-      "eteColumnResponse\022M\n\014ModifyColumn\022\035.hbas" +
-      "e.pb.ModifyColumnRequest\032\036.hbase.pb.Modi" +
-      "fyColumnResponse\022G\n\nMoveRegion\022\033.hbase.p",
-      "b.MoveRegionRequest\032\034.hbase.pb.MoveRegio" +
-      "nResponse\022k\n\026DispatchMergingRegions\022\'.hb" +
-      "ase.pb.DispatchMergingRegionsRequest\032(.h" +
-      "base.pb.DispatchMergingRegionsResponse\022M" +
-      "\n\014AssignRegion\022\035.hbase.pb.AssignRegionRe" +
-      "quest\032\036.hbase.pb.AssignRegionResponse\022S\n" +
-      "\016UnassignRegion\022\037.hbase.pb.UnassignRegio" +
-      "nRequest\032 .hbase.pb.UnassignRegionRespon" +
-      "se\022P\n\rOfflineRegion\022\036.hbase.pb.OfflineRe" +
-      "gionRequest\032\037.hbase.pb.OfflineRegionResp",
-      "onse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteTab" +
-      "leRequest\032\035.hbase.pb.DeleteTableResponse" +
-      "\022P\n\rtruncateTable\022\036.hbase.pb.TruncateTab" +
-      "leRequest\032\037.hbase.pb.TruncateTableRespon" +
-      "se\022J\n\013EnableTable\022\034.hbase.pb.EnableTable" +
-      "Request\032\035.hbase.pb.EnableTableResponse\022M" +
-      "\n\014DisableTable\022\035.hbase.pb.DisableTableRe" +
-      "quest\032\036.hbase.pb.DisableTableResponse\022J\n" +
-      "\013ModifyTable\022\034.hbase.pb.ModifyTableReque" +
-      "st\032\035.hbase.pb.ModifyTableResponse\022J\n\013Cre",
-      "ateTable\022\034.hbase.pb.CreateTableRequest\032\035" +
-      ".hbase.pb.CreateTableResponse\022A\n\010Shutdow" +
-      "n\022\031.hbase.pb.ShutdownRequest\032\032.hbase.pb." +
-      "ShutdownResponse\022G\n\nStopMaster\022\033.hbase.p" +
-      "b.StopMasterRequest\032\034.hbase.pb.StopMaste" +
-      "rResponse\022h\n\031IsMasterInMaintenanceMode\022$" +
-      ".hbase.pb.IsInMaintenanceModeRequest\032%.h" +
-      "base.pb.IsInMaintenanceModeResponse\022>\n\007B" +
-      "alance\022\030.hbase.pb.BalanceRequest\032\031.hbase" +
-      ".pb.BalanceResponse\022_\n\022SetBalancerRunnin",
-      "g\022#.hbase.pb.SetBalancerRunningRequest\032$" +
-      ".hbase.pb.SetBalancerRunningResponse\022\\\n\021" +
-      "IsBalancerEnabled\022\".hbase.pb.IsBalancerE" +
-      "nabledRequest\032#.hbase.pb.IsBalancerEnabl" +
-      "edResponse\022k\n\026SetSplitOrMergeEnabled\022\'.h" +
-      "base.pb.SetSplitOrMergeEnabledRequest\032(." +
-      "hbase.pb.SetSplitOrMergeEnabledResponse\022" +
-      "h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.IsSp" +
-      "litOrMergeEnabledRequest\032\'.hbase.pb.IsSp" +
-      "litOrMergeEnabledResponse\022D\n\tNormalize\022\032",
-      ".hbase.pb.NormalizeRequest\032\033.hbase.pb.No" +
-      "rmalizeResponse\022e\n\024SetNormalizerRunning\022" +
-      "%.hbase.pb.SetNormalizerRunningRequest\032&" +
-      ".hbase.pb.SetNormalizerRunningResponse\022b" +
-      "\n\023IsNormalizerEnabled\022$.hbase.pb.IsNorma" +
-      "lizerEnabledRequest\032%.hbase.pb.IsNormali" +
-      "zerEnabledResponse\022S\n\016RunCatalogScan\022\037.h" +
-      "base.pb.RunCatalogScanRequest\032 .hbase.pb" +
-      ".RunCatalogScanResponse\022e\n\024EnableCatalog" +
-      "Janitor\022%.hbase.pb.EnableCatalogJanitorR",
-      "equest\032&.hbase.pb.EnableCatalogJanitorRe" +
-      "sponse\022n\n\027IsCatalogJanitorEnabled\022(.hbas" +
-      "e.pb.IsCatalogJanitorEnabledRequest\032).hb" +
-      "ase.pb.IsCatalogJanitorEnabledResponse\022V" +
-      "\n\017RunCleanerChore\022 .hbase.pb.RunCleanerC" +
-      "horeRequest\032!.hbase.pb.RunCleanerChoreRe" +
-      "sponse\022k\n\026SetCleanerChoreRunning\022\'.hbase" +
-      ".pb.SetCleanerChoreRunningRequest\032(.hbas" +
-      "e.pb.SetCleanerChoreRunningResponse\022h\n\025I" +
-      "sCleanerChoreEnabled\022&.hbase.pb.IsCleane",
-      "rChoreEnabledRequest\032\'.hbase.pb.IsCleane" +
-      "rChoreEnabledResponse\022^\n\021ExecMasterServi" +
-      "ce\022#.hbase.pb.CoprocessorServiceRequest\032" +
-      "$.hbase.pb.CoprocessorServiceResponse\022A\n" +
-      "\010Snapshot\022\031.hbase.pb.SnapshotRequest\032\032.h" +
-      "base.pb.SnapshotResponse\022h\n\025GetCompleted" +
-      "Snapshots\022&.hbase.pb.GetCompletedSnapsho" +
-      "tsRequest\032\'.hbase.pb.GetCompletedSnapsho" +
-      "tsResponse\022S\n\016DeleteSnapshot\022\037.hbase.pb." +
-      "DeleteSnapshotRequest\032 .hbase.pb.DeleteS",
-      "napshotResponse\022S\n\016IsSnapshotDone\022\037.hbas" +
-      "e.pb.IsSnapshotDoneRequest\032 .hbase.pb.Is" +
-      "SnapshotDoneResponse\022V\n\017RestoreSnapshot\022" +
-      " .hbase.pb.RestoreSnapshotRequest\032!.hbas" +
-      "e.pb.RestoreSnapshotResponse\022h\n\025IsRestor" +
-      "eSnapshotDone\022&.hbase.pb.IsRestoreSnapsh" +
-      "otDoneRequest\032\'.hbase.pb.IsRestoreSnapsh" +
-      "otDoneResponse\022P\n\rExecProcedure\022\036.hbase." +
-      "pb.ExecProcedureRequest\032\037.hbase.pb.ExecP" +
-      "rocedureResponse\022W\n\024ExecProcedureWithRet",
-      "\022\036.hbase.pb.ExecProcedureRequest\032\037.hbase" +
-      ".pb.ExecProcedureResponse\022V\n\017IsProcedure" +
-      "Done\022 .hbase.pb.IsProcedureDoneRequest\032!" +
-      ".hbase.pb.IsProcedureDoneResponse\022V\n\017Mod" +
-      "ifyNamespace\022 .hbase.pb.ModifyNamespaceR" +
-      "equest\032!.hbase.pb.ModifyNamespaceRespons" +
-      "e\022V\n\017CreateNamespace\022 .hbase.pb.CreateNa" +
-      "mespaceRequest\032!.hbase.pb.CreateNamespac" +
-      "eResponse\022V\n\017DeleteNamespace\022 .hbase.pb." +
-      "DeleteNamespaceRequest\032!.hbase.pb.Delete",
-      "NamespaceResponse\022k\n\026GetNamespaceDescrip" +
-      "tor\022\'.hbase.pb.GetNamespaceDescriptorReq" +
-      "uest\032(.hbase.pb.GetNamespaceDescriptorRe" +
-      "sponse\022q\n\030ListNamespaceDescriptors\022).hba" +
-      "se.pb.ListNamespaceDescriptorsRequest\032*." +
-      "hbase.pb.ListNamespaceDescriptorsRespons" +
-      "e\022\206\001\n\037ListTableDescriptorsByNamespace\0220." +
-      "hbase.pb.ListTableDescriptorsByNamespace" +
-      "Request\0321.hbase.pb.ListTableDescriptorsB" +
-      "yNamespaceResponse\022t\n\031ListTableNamesByNa",
-      "mespace\022*.hbase.pb.ListTableNamesByNames" +
-      "paceRequest\032+.hbase.pb.ListTableNamesByN" +
-      "amespaceResponse\022A\n\010SetQuota\022\031.hbase.pb." +
-      "SetQuotaRequest\032\032.hbase.pb.SetQuotaRespo" +
-      "nse\022x\n\037getLastMajorCompactionTimestamp\022)" +
-      ".hbase.pb.MajorCompactionTimestampReques" +
-      "t\032*.hbase.pb.MajorCompactionTimestampRes" +
-      "ponse\022\212\001\n(getLastMajorCompactionTimestam" +
-      "pForRegion\0222.hbase.pb.MajorCompactionTim" +
-      "estampForRegionRequest\032*.hbase.pb.MajorC",
-      "ompactionTimestampResponse\022_\n\022getProcedu" +
-      "reResult\022#.hbase.pb.GetProcedureResultRe" +
-      "quest\032$.hbase.pb.GetProcedureResultRespo" +
-      "nse\022h\n\027getSecurityCapabilities\022%.hbase.p" +
-      "b.SecurityCapabilitiesRequest\032&.hbase.pb" +
-      ".SecurityCapabilitiesResponse\022S\n\016AbortPr" +
-      "ocedure\022\037.hbase.pb.AbortProcedureRequest" +
-      "\032 .hbase.pb.AbortProcedureResponse\022S\n\016Li" +
-      "stProcedures\022\037.hbase.pb.ListProceduresRe" +
-      "quest\032 .hbase.pb.ListProceduresResponse\022",
-      "Y\n\020ClearDeadServers\022!.hbase.pb.ClearDead" +
-      "ServersRequest\032\".hbase.pb.ClearDeadServe" +
-      "rsResponse\022V\n\017ListDeadServers\022 .hbase.pb" +
-      ".ListDeadServersRequest\032!.hbase.pb.ListD" +
-      "eadServersResponseBB\n*org.apache.hadoop." +
-      "hbase.protobuf.generatedB\014MasterProtosH\001" +
-      "\210\001\001\240\001\001"
+      "TION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"D\n\027ClearDead" +
+      "ServersRequest\022)\n\013server_name\030\001 \003(\0132\024.hb" +
+      "ase.pb.ServerName\"E\n\030ClearDeadServersRes" +
+      "ponse\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.Se" +
+      "rverName*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022" +
+      "\t\n\005MERGE\020\0012\365+\n\rMasterService\022e\n\024GetSchem",
+      "aAlterStatus\022%.hbase.pb.GetSchemaAlterSt" +
+      "atusRequest\032&.hbase.pb.GetSchemaAlterSta" +
+      "tusResponse\022b\n\023GetTableDescriptors\022$.hba" +
+      "se.pb.GetTableDescriptorsRequest\032%.hbase" +
+      ".pb.GetTableDescriptorsResponse\022P\n\rGetTa" +
+      "bleNames\022\036.hbase.pb.GetTableNamesRequest" +
+      "\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020Get" +
+      "ClusterStatus\022!.hbase.pb.GetClusterStatu" +
+      "sRequest\032\".hbase.pb.GetClusterStatusResp" +
+      "onse\022V\n\017IsMasterRunning\022 .hbase.pb.IsMas",
+      "terRunningRequest\032!.hbase.pb.IsMasterRun" +
+      "ningResponse\022D\n\tAddColumn\022\032.hbase.pb.Add" +
+      "ColumnRequest\032\033.hbase.pb.AddColumnRespon" +
+      "se\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteColu" +
+      "mnRequest\032\036.hbase.pb.DeleteColumnRespons" +
+      "e\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyColum" +
+      "nRequest\032\036.hbase.pb.ModifyColumnResponse" +
+      "\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionRequ" +
+      "est\032\034.hbase.pb.MoveRegionResponse\022k\n\026Dis" +
+      "patchMergingRegions\022\'.hbase.pb.DispatchM",
+      "ergingRegionsRequest\032(.hbase.pb.Dispatch" +
+      "MergingRegionsResponse\022M\n\014AssignRegion\022\035" +
+      ".hbase.pb.AssignRegionRequest\032\036.hbase.pb" +
+      ".AssignRegionResponse\022S\n\016UnassignRegion\022" +
+      "\037.hbase.pb.UnassignRegionRequest\032 .hbase" +
+      ".pb.UnassignRegionResponse\022P\n\rOfflineReg" +
+      "ion\022\036.hbase.pb.OfflineRegionRequest\032\037.hb" +
+      "ase.pb.OfflineRegionResponse\022J\n\013DeleteTa" +
+      "ble\022\034.hbase.pb.DeleteTableRequest\032\035.hbas" +
+      "e.pb.DeleteTableResponse\022P\n\rtruncateTabl",
+      "e\022\036.hbase.pb.TruncateTableRequest\032\037.hbas" +
+      "e.pb.TruncateTableResponse\022J\n\013EnableTabl" +
+      "e\022\034.hbase.pb.EnableTableRequest\032\035.hbase." +
+      "pb.EnableTableResponse\022M\n\014DisableTable\022\035" +
+      ".hbase.pb.DisableTableRequest\032\036.hbase.pb" +
+      ".DisableTableResponse\022J\n\013ModifyTable\022\034.h" +
+      "base.pb.ModifyTableRequest\032\035.hbase.pb.Mo" +
+      "difyTableResponse\022J\n\013CreateTable\022\034.hbase" +
+      ".pb.CreateTableRequest\032\035.hbase.pb.Create" +
+      "TableResponse\022A\n\010Shutdown\022\031.hbase.pb.Shu",
+      "tdownRequest\032\032.hbase.pb.ShutdownResponse" +
+      "\022G\n\nStopMaster\022\033.hbase.pb.StopMasterRequ" +
+      "est\032\034.hbase.pb.StopMasterResponse\022h\n\031IsM" +
+      "asterInMaintenanceMode\022$.hbase.pb.IsInMa" +
+      "intenanceModeRequest\032%.hbase.pb.IsInMain" +
+      "tenanceModeResponse\022>\n\007Balance\022\030.hbase.p" +
+      "b.BalanceRequest\032\031.hbase.pb.BalanceRespo" +
+      "nse\022_\n\022SetBalancerRunning\022#.hbase.pb.Set" +
+      "BalancerRunningRequest\032$.hbase.pb.SetBal" +
+      "ancerRunningResponse\022\\\n\021IsBalancerEnable",
+      "d\022\".hbase.pb.IsBalancerEnabledRequest\032#." +
+      "hbase.pb.IsBalancerEnabledResponse\022k\n\026Se" +
+      "tSplitOrMergeEnabled\022\'.hbase.pb.SetSplit" +
+      "OrMergeEnabledRequest\032(.hbase.pb.SetSpli" +
+      "tOrMergeEnabledResponse\022h\n\025IsSplitOrMerg" +
+      "eEnabled\022&.hbase.pb.IsSplitOrMergeEnable" +
+      "dRequest\032\'.hbase.pb.IsSplitOrMergeEnable" +
+      "dResponse\022D\n\tNormalize\022\032.hbase.pb.Normal" +
+      "izeRequest\032\033.hbase.pb.NormalizeResponse\022" +
+      "e\n\024SetNormalizerRunning\022%.hbase.pb.SetNo",
+      "rmalizerRunningRequest\032&.hbase.pb.SetNor" +
+      "malizerRunningResponse\022b\n\023IsNormalizerEn" +
+      "abled\022$.hbase.pb.IsNormalizerEnabledRequ" +
+      "est\032%.hbase.pb.IsNormalizerEnabledRespon" +
+      "se\022S\n\016RunCatalogScan\022\037.hbase.pb.RunCatal" +
+      "ogScanRequest\032 .hbase.pb.RunCatalogScanR" +
+      "esponse\022e\n\024EnableCatalogJanitor\022%.hbase." +
+      "pb.EnableCatalogJanitorRequest\032&.hbase.p" +
+      "b.EnableCatalogJanitorResponse\022n\n\027IsCata" +
+      "logJanitorEnabled\022(.hbase.pb.IsCatalogJa",
+      "nitorEnabledRequest\032).hbase.pb.IsCatalog" +
+      "JanitorEnabledResponse\022V\n\017RunCleanerChor" +
+      "e\022 .hbase.pb.RunCleanerChoreRequest\032!.hb" +
+      "ase.pb.RunCleanerChoreResponse\022k\n\026SetCle" +
+      "anerChoreRunning\022\'.hbase.pb.SetCleanerCh" +
+      "oreRunningRequest\032(.hbase.pb.SetCleanerC" +
+      "horeRunningResponse\022h\n\025IsCleanerChoreEna" +
+      "bled\022&.hbase.pb.IsCleanerChoreEnabledReq" +
+      "uest\032\'.hbase.pb.IsCleanerChoreEnabledRes" +
+      "ponse\022^\n\021ExecMasterService\022#.hbase.pb.Co",
+      "processorServiceRequest\032$.hbase.pb.Copro" +
+      "cessorServiceResponse\022A\n\010Snapshot\022\031.hbas" +
+      "e.pb.SnapshotRequest\032\032.hbase.pb.Snapshot" +
+      "Response\022h\n\025GetCompletedSnapshots\022&.hbas" +
+      "e.pb.GetCompletedSnapshotsRequest\032\'.hbas" +
+      "e.pb.GetCompletedSnapshotsResponse\022S\n\016De" +
+      "leteSnapshot\022\037.hbase.pb.DeleteSnapshotRe" +
+      "quest\032 .hbase.pb.DeleteSnapshotResponse\022" +
+      "S\n\016IsSnapshotDone\022\037.hbase.pb.IsSnapshotD" +
+      "oneRequest\032 .hbase.pb.IsSnapshotDoneResp",
+      "onse\022V\n\017RestoreSnapshot\022 .hbase.pb.Resto" +
+      "reSnapshotRequest\032!.hbase.pb.RestoreSnap" +
+      "shotResponse\022h\n\025IsRestoreSnapshotDone\022&." +
+      "hbase.pb.IsRestoreSnapshotDoneRequest\032\'." +
+      "hbase.pb.IsRestoreSnapshotDoneResponse\022P" +
+      "\n\rExecProcedure\022\036.hbase.pb.ExecProcedure" +
+      "Request\032\037.hbase.pb.ExecProcedureResponse" +
+      "\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exec" +
+      "ProcedureRequest\032\037.hbase.pb.ExecProcedur" +
+      "eResponse\022V\n\017IsProcedureDone\022 .hbase.pb.",
+      "IsProcedureDoneRequest\032!.hbase.pb.IsProc" +
+      "edureDoneResponse\022V\n\017ModifyNamespace\022 .h" +
+      "base.pb.ModifyNamespaceRequest\032!.hbase.p" +
+      "b.ModifyNamespaceResponse\022V\n\017CreateNames" +
+      "pace\022 .hbase.pb.CreateNamespaceRequest\032!" +
+      ".hbase.pb.CreateNamespaceResponse\022V\n\017Del" +
+      "eteNamespace\022 .hbase.pb.DeleteNamespaceR" +
+      "equest\032!.hbase.pb.DeleteNamespaceRespons" +
+      "e\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb.G" +
+      "etNamespaceDescriptorRequest\032(.hbase.pb.",
+      "GetNamespaceDescriptorResponse\022q\n\030ListNa" +
+      "mespaceDescriptors\022).hbase.pb.ListNamesp" +
+      "aceDescriptorsRequest\032*.hbase.pb.ListNam" +
+      "espaceDescriptorsResponse\022\206\001\n\037ListTableD" +
+      "escriptorsByNamespace\0220.hbase.pb.ListTab" +
+      "leDescriptorsByNamespaceRequest\0321.hbase." +
+      "pb.ListTableDescriptorsByNamespaceRespon" +
+      "se\022t\n\031ListTableNamesByNamespace\022*.hbase." +
+      "pb.ListTableNamesByNamespaceRequest\032+.hb" +
+      "ase.pb.ListTableNamesByNamespaceResponse",
+      "\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaRequest\032" +
+      "\032.hbase.pb.SetQuotaResponse\022x\n\037getLastMa" +
+      "jorCompactionTimestamp\022).hbase.pb.MajorC" +
+      "ompactionTimestampRequest\032*.hbase.pb.Maj" +
+      "orCompactionTimestampResponse\022\212\001\n(getLas" +
+      "tMajorCompactionTimestampForRegion\0222.hba" +
+      "se.pb.MajorCompactionTimestampForRegionR" +
+      "equest\032*.hbase.pb.MajorCompactionTimesta" +
+      "mpResponse\022_\n\022getProcedureResult\022#.hbase" +
+      ".pb.GetProcedureResultRequest\032$.hbase.pb",
+      ".GetProcedureResultResponse\022h\n\027getSecuri" +
+      "tyCapabilities\022%.hbase.pb.SecurityCapabi" +
+      "litiesRequest\032&.hbase.pb.SecurityCapabil" +
+      "itiesResponse\022S\n\016AbortProcedure\022\037.hbase." +
+      "pb.AbortProcedureRequest\032 .hbase.pb.Abor" +
+      "tProcedureResponse\022S\n\016ListProcedures\022\037.h" +
+      "base.pb.ListProceduresRequest\032 .hbase.pb" +
+      ".ListProceduresResponse\022Y\n\020ClearDeadServ" +
+      "ers\022!.hbase.pb.ClearDeadServersRequest\032\"" +
+      ".hbase.pb.ClearDeadServersResponseBB\n*or",
+      "g.apache.hadoop.hbase.protobuf.generated" +
+      "B\014MasterProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -69902,26 +68749,14 @@ public final class MasterProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SecurityCapabilitiesResponse_descriptor,
               new java.lang.String[] { "Capabilities", });
-          internal_static_hbase_pb_ListDeadServersRequest_descriptor =
-            getDescriptor().getMessageTypes().get(115);
-          internal_static_hbase_pb_ListDeadServersRequest_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_ListDeadServersRequest_descriptor,
-              new java.lang.String[] { });
-          internal_static_hbase_pb_ListDeadServersResponse_descriptor =
-            getDescriptor().getMessageTypes().get(116);
-          internal_static_hbase_pb_ListDeadServersResponse_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_ListDeadServersResponse_descriptor,
-              new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_ClearDeadServersRequest_descriptor =
-            getDescriptor().getMessageTypes().get(117);
+            getDescriptor().getMessageTypes().get(115);
           internal_static_hbase_pb_ClearDeadServersRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersRequest_descriptor,
               new java.lang.String[] { "ServerName", });
           internal_static_hbase_pb_ClearDeadServersResponse_descriptor =
-            getDescriptor().getMessageTypes().get(118);
+            getDescriptor().getMessageTypes().get(116);
           internal_static_hbase_pb_ClearDeadServersResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ClearDeadServersResponse_descriptor,

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index ef86114..341f687 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -556,13 +556,6 @@ message SecurityCapabilitiesResponse {
   repeated Capability capabilities = 1;
 }
 
-message ListDeadServersRequest {
-}
-
-message ListDeadServersResponse {
-  repeated ServerName server_name = 1;
-}
-
 message ClearDeadServersRequest {
   repeated ServerName server_name = 1;
 }
@@ -867,9 +860,4 @@ service MasterService {
   /** clear dead servers from master*/
   rpc ClearDeadServers(ClearDeadServersRequest)
     returns(ClearDeadServersResponse);
-
-  /** Returns a list of Dead Servers. */
-  rpc ListDeadServers(ListDeadServersRequest)
-    returns(ListDeadServersResponse);
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index c4f5952..a6fbd05 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -31,6 +31,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -953,15 +954,13 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
   }
 
   @Override
-  public void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {
-
   }
 
   @Override
-  public void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-      throws IOException {
-
+  public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      ClusterStatus status) throws IOException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
index 9d5c2f9..7f7a5e5 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
@@ -54,7 +54,8 @@ MasterAddressTracker masterAddressTracker = master.getMasterAddressTracker();
         <th>Start Time</th>
     </tr>
     <%java>
-    Collection<ServerName> backup_masters = master.getClusterStatus().getBackupMasters();
+    Collection<ServerName> backup_masters
+      = master.getClusterStatusWithoutCoprocessor().getBackupMasters();
     ServerName [] backupServerNames = backup_masters.toArray(new ServerName[backup_masters.size()]);
     Arrays.sort(backupServerNames);
     for (ServerName serverName : backupServerNames) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 9ad8453..6452226 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -67,12 +68,13 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
   }
 
   @Override
-  public void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
     throws IOException {
   }
 
   @Override
-  public void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      ClusterStatus status)
     throws IOException {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index ca2bd53..842d917 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -78,13 +79,14 @@ public class BaseMasterObserver implements MasterObserver {
   }
 
   @Override
-  public void preListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-    throws IOException {
+  public void preGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx)
+      throws IOException {
   }
 
   @Override
-  public void postListDeadServers(ObserverContext<MasterCoprocessorEnvironment> ctx)
-    throws IOException {
+  public void postGetClusterStatus(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      ClusterStatus status)
+      throws IOException {
   }
 
   @Override
@@ -474,7 +476,7 @@ public class BaseMasterObserver implements MasterObserver {
   public void postListSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final SnapshotDescription snapshot) throws IOException {
   }
-  
+
   @Override
   public void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor)


[3/3] hbase git commit: HBASE-19131 Add the ClusterStatus hook and cleanup other hooks which can be replaced by ClusterStatus hook

Posted by ch...@apache.org.
HBASE-19131 Add the ClusterStatus hook and cleanup other hooks which can be replaced by ClusterStatus hook


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9ad69e44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9ad69e44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9ad69e44

Branch: refs/heads/branch-1.4
Commit: 9ad69e446529e5b48595063039ef58730e9fd842
Parents: 4d7c40a
Author: Chia-Ping Tsai <ch...@gmail.com>
Authored: Sat Nov 4 22:10:35 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Sun Nov 5 09:54:56 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ClusterStatus.java  |   17 +-
 .../org/apache/hadoop/hbase/ServerLoad.java     |   32 +
 .../hadoop/hbase/client/ConnectionManager.java  |    9 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   10 +-
 .../hbase/protobuf/generated/MasterProtos.java  | 1497 ++----------------
 hbase-protocol/src/main/protobuf/Master.proto   |   12 -
 .../hbase/rsgroup/RSGroupAdminEndpoint.java     |    9 +-
 .../tmpl/master/BackupMasterStatusTmpl.jamon    |    3 +-
 .../BaseMasterAndRegionObserver.java            |    6 +-
 .../hbase/coprocessor/BaseMasterObserver.java   |   12 +-
 .../hbase/coprocessor/MasterObserver.java       |   18 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   24 +-
 .../hbase/master/MasterCoprocessorHost.java     |   15 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   29 -
 .../master/balancer/ClusterStatusChore.java     |    2 +-
 .../hbase/security/access/AccessController.java |   23 +-
 .../hbase/client/TestClientClusterStatus.java   |  188 +++
 .../hbase/coprocessor/TestMasterObserver.java   |    7 +-
 .../TestSplitTransactionOnCluster.java          |    2 +-
 .../security/access/TestAccessController.java   |   14 +
 20 files changed, 481 insertions(+), 1448 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 97b0ea2..3ca55fc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -19,6 +19,7 @@
 
 package org.apache.hadoop.hbase;
 
+import com.google.common.base.Objects;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -194,14 +195,14 @@ public class ClusterStatus extends VersionedWritable {
     if (!(o instanceof ClusterStatus)) {
       return false;
     }
-    return (getVersion() == ((ClusterStatus)o).getVersion()) &&
-      getHBaseVersion().equals(((ClusterStatus)o).getHBaseVersion()) &&
-      this.liveServers.equals(((ClusterStatus)o).liveServers) &&
-      this.deadServers.containsAll(((ClusterStatus)o).deadServers) &&
-      Arrays.equals(this.masterCoprocessors,
-                    ((ClusterStatus)o).masterCoprocessors) &&
-      this.master.equals(((ClusterStatus)o).master) &&
-      this.backupMasters.containsAll(((ClusterStatus)o).backupMasters);
+    ClusterStatus other = (ClusterStatus) o;
+    return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) &&
+        Objects.equal(this.liveServers, other.liveServers) &&
+        getDeadServerNames().containsAll(other.getDeadServerNames()) &&
+        Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) &&
+        Objects.equal(getMaster(), other.getMaster()) &&
+        getBackupMasters().containsAll(other.getBackupMasters()) &&
+        Objects.equal(getClusterId(), other.getClusterId());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index ddca47a..e6d2a1d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -20,6 +20,7 @@
 
 package org.apache.hadoop.hbase;
 
+import com.google.common.base.Objects;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
@@ -329,4 +330,35 @@ public class ServerLoad {
   public long getReportTime() {
     return reportTime;
   }
+
+  @Override
+  public int hashCode() {
+    return Objects.hashCode(stores, storefiles, storeUncompressedSizeMB,
+        storefileSizeMB, memstoreSizeMB, storefileIndexSizeMB, readRequestsCount,
+        writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
+        totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs);
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == this) {
+      return true;
+    }
+    if (other instanceof ServerLoad) {
+      ServerLoad sl = ((ServerLoad) other);
+      return stores == sl.stores && storefiles == sl.storefiles
+          && storeUncompressedSizeMB == sl.storeUncompressedSizeMB
+          && storefileSizeMB == sl.storefileSizeMB
+          && memstoreSizeMB == sl.memstoreSizeMB
+          && storefileIndexSizeMB == sl.storefileIndexSizeMB
+          && readRequestsCount == sl.readRequestsCount
+          && writeRequestsCount == sl.writeRequestsCount
+          && rootIndexSizeKB == sl.rootIndexSizeKB
+          && totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB
+          && totalStaticBloomSizeKB == sl.totalStaticBloomSizeKB
+          && totalCompactingKVs == sl.totalCompactingKVs
+          && currentCompactedKVs == sl.currentCompactedKVs;
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index 8204bb8..3f2cf1c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -1808,13 +1808,6 @@ class ConnectionManager {
         }
 
         @Override
-        public MasterProtos.ListDeadServersResponse listDeadServers(
-            RpcController controller,
-            MasterProtos.ListDeadServersRequest request) throws ServiceException {
-          return stub.listDeadServers(controller, request);
-        }
-
-        @Override
         public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request)
         throws ServiceException {
           return stub.addColumn(controller, request);
@@ -2721,7 +2714,7 @@ class ConnectionManager {
     public boolean hasCellBlockSupport() {
       return this.rpcClient.hasCellBlockSupport();
     }
-    
+
     @Override
     public ConnectionConfiguration getConnectionConfiguration() {
       return this.connectionConfig;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9ad69e44/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index d543ddb..0925e38 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -136,7 +136,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshot
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListDeadServersRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListProceduresRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
@@ -4903,14 +4902,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public List<ServerName> listDeadServers() throws IOException {
-    return executeCallable(new MasterCallable<List<ServerName>>(getConnection()) {
-      @Override
-      public List<ServerName> call(int callTimeout) throws ServiceException {
-        ListDeadServersRequest req = ListDeadServersRequest.newBuilder().build();
-        return ProtobufUtil.toServerNameList(
-                master.listDeadServers(null, req).getServerNameList());
-      }
-    });
+    return new ArrayList<>(getClusterStatus().getDeadServerNames());
   }
 
   @Override