You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jx...@apache.org on 2014/03/25 20:34:55 UTC

svn commit: r1581479 [1/9] - in /hbase/trunk: hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-it/src/test/java/org/apache/hadoop/hbase/ hbase-it/src/test/java/org/apache/hadoo...

Author: jxiang
Date: Tue Mar 25 19:34:52 2014
New Revision: 1581479

URL: http://svn.apache.org/r1581479
Log:
HBASE-10569 Co-locate meta and master

Added:
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java   (with props)
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java   (with props)
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java   (with props)
Modified:
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
    hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
    hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
    hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
    hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java
    hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
    hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
    hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
    hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterDumpServlet.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterStatusServlet.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/ClusterLoadState.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/CreateTableHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DeleteTableHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/EnableTableHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ModifyTableHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableAddFamilyHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableDeleteFamilyHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableModifyFamilyHandler.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/StateDumpServlet.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AnnotationReadingPriorityFunction.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LogRoller.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSDumpServlet.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
    hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/util/JVMClusterUtil.java
    hbase/trunk/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestDrainingServer.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestGlobalMemStoreSize.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestZooKeeper.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientScannerRPCTimeout.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiParallel.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestHTableWrapper.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithAbort.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterCoprocessorExceptionWithRemove.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverInterface.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorExceptionWithAbort.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/TestBlockReorder.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestLoadIncrementalHFilesSplitRecovery.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/Mocking.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestActiveMasterManager.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterFailover.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetricsWrapper.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterRestartAfterDisablingTable.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterShutdown.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterStatusServlet.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRollingRestart.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestZKBasedOpenCloseRegion.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/BalancerTestBase.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestBaseLoadBalancer.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestSnapshotFromMaster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/OOMERegionServer.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestClusterId.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPriorityRpc.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestQosFunction.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSKilledWhenInitializing.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRSStatusServlet.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerNoMaster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestNamespaceCommands.java
    hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/SnapshotTestingUtils.java

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/CatalogTracker.java Tue Mar 25 19:34:52 2014
@@ -129,7 +129,7 @@ public class CatalogTracker {
    * @throws IOException
    */
   public CatalogTracker(final Configuration conf) throws IOException {
-    this(null, conf, null);
+    this(null, conf, HConnectionManager.getConnection(conf), null);
   }
 
   /**
@@ -145,18 +145,14 @@ public class CatalogTracker {
    * @throws IOException
    */
   public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
-      Abortable abortable)
-  throws IOException {
-    this(zk, conf, HConnectionManager.getConnection(conf), abortable);
-  }
-
-  public CatalogTracker(final ZooKeeperWatcher zk, final Configuration conf,
       HConnection connection, Abortable abortable)
   throws IOException {
     this.connection = connection;
     if (abortable == null) {
       // A connection is abortable.
       this.abortable = this.connection;
+    } else {
+      this.abortable = abortable;
     }
     Abortable throwableAborter = new Abortable() {
 
@@ -322,6 +318,7 @@ public class CatalogTracker {
    * invocation, or may be null.
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   private AdminService.BlockingInterface getCachedConnection(ServerName sn)
   throws IOException {
     if (sn == null) {

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java?rev=1581479&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java Tue Mar 25 19:34:52 2014
@@ -0,0 +1,409 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MasterNotRunningException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+
+/**
+ * An internal class that adapts a {@link HConnection}.
+ * HConnection is created from HConnectionManager. The default
+ * implementation talks to region servers over RPC since it
+ * doesn't know if the connection is used by one region server
+ * itself. This adapter makes it possible to change some of the
+ * default logic. Especially, when the connection is used
+ * internally by some the region server.
+ *
+ * @see ConnectionUtils#createShortCircuitHConnection(HConnection, ServerName,
+ * AdminService.BlockingInterface, ClientService.BlockingInterface)
+ */
+@InterfaceAudience.Private
+@SuppressWarnings("deprecation")
+//NOTE: DO NOT make this class public. It was made package-private on purpose.
+class ConnectionAdapter implements ClusterConnection {
+
+  private final ClusterConnection wrappedConnection;
+
+  public ConnectionAdapter(HConnection c) {
+    wrappedConnection = (ClusterConnection)c;
+  }
+
+  @Override
+  public void abort(String why, Throwable e) {
+    wrappedConnection.abort(why, e);
+  }
+
+  @Override
+  public boolean isAborted() {
+    return wrappedConnection.isAborted();
+  }
+
+  @Override
+  public void close() throws IOException {
+    wrappedConnection.close();
+  }
+
+  @Override
+  public Configuration getConfiguration() {
+    return wrappedConnection.getConfiguration();
+  }
+
+  @Override
+  public HTableInterface getTable(String tableName) throws IOException {
+    return wrappedConnection.getTable(tableName);
+  }
+
+  @Override
+  public HTableInterface getTable(byte[] tableName) throws IOException {
+    return wrappedConnection.getTable(tableName);
+  }
+
+  @Override
+  public HTableInterface getTable(TableName tableName) throws IOException {
+    return wrappedConnection.getTable(tableName);
+  }
+
+  @Override
+  public HTableInterface getTable(String tableName, ExecutorService pool)
+      throws IOException {
+    return wrappedConnection.getTable(tableName, pool);
+  }
+
+  @Override
+  public HTableInterface getTable(byte[] tableName, ExecutorService pool)
+      throws IOException {
+    return wrappedConnection.getTable(tableName, pool);
+  }
+
+  @Override
+  public HTableInterface getTable(TableName tableName, ExecutorService pool)
+      throws IOException {
+    return wrappedConnection.getTable(tableName, pool);
+  }
+
+  @Override
+  public boolean isMasterRunning() throws MasterNotRunningException,
+      ZooKeeperConnectionException {
+    return wrappedConnection.isMasterRunning();
+  }
+
+  @Override
+  public boolean isTableEnabled(TableName tableName) throws IOException {
+    return wrappedConnection.isTableEnabled(tableName);
+  }
+
+  @Override
+  public boolean isTableEnabled(byte[] tableName) throws IOException {
+    return wrappedConnection.isTableEnabled(tableName);
+  }
+
+  @Override
+  public boolean isTableDisabled(TableName tableName) throws IOException {
+    return wrappedConnection.isTableDisabled(tableName);
+  }
+
+  @Override
+  public boolean isTableDisabled(byte[] tableName) throws IOException {
+    return wrappedConnection.isTableDisabled(tableName);
+  }
+
+  @Override
+  public boolean isTableAvailable(TableName tableName) throws IOException {
+    return wrappedConnection.isTableAvailable(tableName);
+  }
+
+  @Override
+  public boolean isTableAvailable(byte[] tableName) throws IOException {
+    return wrappedConnection.isTableAvailable(tableName);
+  }
+
+  @Override
+  public boolean isTableAvailable(TableName tableName, byte[][] splitKeys)
+      throws IOException {
+    return wrappedConnection.isTableAvailable(tableName, splitKeys);
+  }
+
+  @Override
+  public boolean isTableAvailable(byte[] tableName, byte[][] splitKeys)
+      throws IOException {
+    return wrappedConnection.isTableAvailable(tableName, splitKeys);
+  }
+
+  @Override
+  public HTableDescriptor[] listTables() throws IOException {
+    return wrappedConnection.listTables();
+  }
+
+  @Override
+  public String[] getTableNames() throws IOException {
+    return wrappedConnection.getTableNames();
+  }
+
+  @Override
+  public TableName[] listTableNames() throws IOException {
+    return wrappedConnection.listTableNames();
+  }
+
+  @Override
+  public HTableDescriptor getHTableDescriptor(TableName tableName)
+      throws IOException {
+    return wrappedConnection.getHTableDescriptor(tableName);
+  }
+
+  @Override
+  public HTableDescriptor getHTableDescriptor(byte[] tableName)
+      throws IOException {
+    return wrappedConnection.getHTableDescriptor(tableName);
+  }
+
+  @Override
+  public HRegionLocation locateRegion(TableName tableName, byte[] row)
+      throws IOException {
+    return wrappedConnection.locateRegion(tableName, row);
+  }
+
+  @Override
+  public HRegionLocation locateRegion(byte[] tableName, byte[] row)
+      throws IOException {
+    return wrappedConnection.locateRegion(tableName, row);
+  }
+
+  @Override
+  public void clearRegionCache() {
+    wrappedConnection.clearRegionCache();
+  }
+
+  @Override
+  public void clearRegionCache(TableName tableName) {
+    wrappedConnection.clearRegionCache(tableName);
+  }
+
+  @Override
+  public void clearRegionCache(byte[] tableName) {
+    wrappedConnection.clearRegionCache(tableName);
+  }
+
+  @Override
+  public void deleteCachedRegionLocation(HRegionLocation location) {
+    wrappedConnection.deleteCachedRegionLocation(location);
+  }
+
+  @Override
+  public HRegionLocation relocateRegion(TableName tableName, byte[] row)
+      throws IOException {
+    return wrappedConnection.relocateRegion(tableName, row);
+  }
+
+  @Override
+  public HRegionLocation relocateRegion(byte[] tableName, byte[] row)
+      throws IOException {
+    return wrappedConnection.relocateRegion(tableName, row);
+  }
+
+  @Override
+  public void updateCachedLocations(TableName tableName, byte[] rowkey,
+      Object exception, HRegionLocation source) {
+    wrappedConnection.updateCachedLocations(tableName, rowkey, exception, source);
+  }
+
+  @Override
+  public void updateCachedLocations(TableName tableName, byte[] rowkey,
+      Object exception, ServerName source) {
+    wrappedConnection.updateCachedLocations(tableName, rowkey, exception, source);
+  }
+
+  @Override
+  public void updateCachedLocations(byte[] tableName, byte[] rowkey,
+      Object exception, HRegionLocation source) {
+    wrappedConnection.updateCachedLocations(tableName, rowkey, exception, source);
+  }
+
+  @Override
+  public HRegionLocation locateRegion(byte[] regionName) throws IOException {
+    return wrappedConnection.locateRegion(regionName);
+  }
+
+  @Override
+  public List<HRegionLocation> locateRegions(TableName tableName)
+      throws IOException {
+    return wrappedConnection.locateRegions(tableName);
+  }
+
+  @Override
+  public List<HRegionLocation> locateRegions(byte[] tableName)
+      throws IOException {
+    return wrappedConnection.locateRegions(tableName);
+  }
+
+  @Override
+  public List<HRegionLocation> locateRegions(TableName tableName,
+      boolean useCache, boolean offlined) throws IOException {
+    return wrappedConnection.locateRegions(tableName, useCache, offlined);
+  }
+
+  @Override
+  public List<HRegionLocation> locateRegions(byte[] tableName,
+      boolean useCache, boolean offlined) throws IOException {
+    return wrappedConnection.locateRegions(tableName, useCache, offlined);
+  }
+
+  @Override
+  public MasterService.BlockingInterface getMaster() throws IOException {
+    return wrappedConnection.getMaster();
+  }
+
+  @Override
+  public AdminService.BlockingInterface getAdmin(
+      ServerName serverName) throws IOException {
+    return wrappedConnection.getAdmin(serverName);
+  }
+
+  @Override
+  public ClientService.BlockingInterface getClient(
+      ServerName serverName) throws IOException {
+    return wrappedConnection.getClient(serverName);
+  }
+
+  @Override
+  public AdminService.BlockingInterface getAdmin(
+      ServerName serverName, boolean getMaster) throws IOException {
+    return wrappedConnection.getAdmin(serverName, getMaster);
+  }
+
+  @Override
+  public HRegionLocation getRegionLocation(TableName tableName, byte[] row,
+      boolean reload) throws IOException {
+    return wrappedConnection.getRegionLocation(tableName, row, reload);
+  }
+
+  @Override
+  public HRegionLocation getRegionLocation(byte[] tableName, byte[] row,
+      boolean reload) throws IOException {
+    return wrappedConnection.getRegionLocation(tableName, row, reload);
+  }
+
+  @Override
+  public void processBatch(List<? extends Row> actions, TableName tableName,
+      ExecutorService pool, Object[] results) throws IOException,
+      InterruptedException {
+    wrappedConnection.processBatch(actions, tableName, pool, results);
+  }
+
+  @Override
+  public void processBatch(List<? extends Row> actions, byte[] tableName,
+      ExecutorService pool, Object[] results) throws IOException,
+      InterruptedException {
+    wrappedConnection.processBatch(actions, tableName, pool, results);
+  }
+
+  @Override
+  public <R> void processBatchCallback(List<? extends Row> list,
+      TableName tableName, ExecutorService pool, Object[] results,
+      Callback<R> callback) throws IOException, InterruptedException {
+    wrappedConnection.processBatchCallback(list, tableName, pool, results, callback);
+  }
+
+  @Override
+  public <R> void processBatchCallback(List<? extends Row> list,
+      byte[] tableName, ExecutorService pool, Object[] results,
+      Callback<R> callback) throws IOException, InterruptedException {
+    wrappedConnection.processBatchCallback(list, tableName, pool, results, callback);
+  }
+
+  @Override
+  public void setRegionCachePrefetch(TableName tableName, boolean enable) {
+    wrappedConnection.setRegionCachePrefetch(tableName, enable);
+  }
+
+  @Override
+  public void setRegionCachePrefetch(byte[] tableName, boolean enable) {
+    wrappedConnection.setRegionCachePrefetch(tableName, enable);
+  }
+
+  @Override
+  public boolean getRegionCachePrefetch(TableName tableName) {
+    return wrappedConnection.getRegionCachePrefetch(tableName);
+  }
+
+  @Override
+  public boolean getRegionCachePrefetch(byte[] tableName) {
+     return wrappedConnection.getRegionCachePrefetch(tableName);
+  }
+
+  @Override
+  public int getCurrentNrHRS() throws IOException {
+    return wrappedConnection.getCurrentNrHRS();
+  }
+
+  @Override
+  public HTableDescriptor[] getHTableDescriptorsByTableName(
+      List<TableName> tableNames) throws IOException {
+    return wrappedConnection.getHTableDescriptorsByTableName(tableNames);
+  }
+
+  @Override
+  public HTableDescriptor[] getHTableDescriptors(List<String> tableNames)
+      throws IOException {
+    return wrappedConnection.getHTableDescriptors(tableNames);
+  }
+
+  @Override
+  public boolean isClosed() {
+    return wrappedConnection.isClosed();
+  }
+
+  @Override
+  public void clearCaches(ServerName sn) {
+    wrappedConnection.clearCaches(sn);
+  }
+
+  @Override
+  public MasterKeepAliveConnection getKeepAliveMasterService()
+      throws MasterNotRunningException {
+    return wrappedConnection.getKeepAliveMasterService();
+  }
+
+  @Override
+  public boolean isDeadServer(ServerName serverName) {
+    return wrappedConnection.isDeadServer(serverName);
+  }
+
+  @Override
+  public NonceGenerator getNonceGenerator() {
+    return wrappedConnection.getNonceGenerator();
+  }
+
+  @Override
+  public AsyncProcess getAsyncProcess() {
+    return wrappedConnection.getAsyncProcess();
+  }
+}
\ No newline at end of file

Propchange: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionAdapter.java
------------------------------------------------------------------------------
    svn:eol-style = native

Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java Tue Mar 25 19:34:52 2014
@@ -17,12 +17,16 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import java.io.IOException;
 import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 
 /**
  * Utility used by client connections.
@@ -92,4 +96,31 @@ public class ConnectionUtils {
     c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
     log.debug(sn + " HConnection server-to-server retries=" + retries);
   }
+
+  /**
+   * Adapt a HConnection so that it can bypass the RPC layer (serialization,
+   * deserialization, networking, etc..) when it talks to a local server.
+   * @param conn the connection to adapt
+   * @param serverName the local server name
+   * @param admin the admin interface of the local server
+   * @param client the client interface of the local server
+   * @return an adapted/decorated HConnection
+   */
+  public static HConnection createShortCircuitHConnection(final HConnection conn,
+      final ServerName serverName, final AdminService.BlockingInterface admin,
+      final ClientService.BlockingInterface client) {
+    return new ConnectionAdapter(conn) {
+      @Override
+      public AdminService.BlockingInterface getAdmin(
+          ServerName sn, boolean getMaster) throws IOException {
+        return serverName.equals(sn) ? admin : super.getAdmin(sn, getMaster);
+      }
+
+      @Override
+      public ClientService.BlockingInterface getClient(
+          ServerName sn) throws IOException {
+        return serverName.equals(sn) ? client : super.getClient(sn);
+      }
+    };
+  }
 }

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java Tue Mar 25 19:34:52 2014
@@ -136,7 +136,7 @@ public class DistributedHBaseCluster ext
   }
 
   @Override
-  public MasterService.BlockingInterface getMaster()
+  public MasterService.BlockingInterface getMasterAdminService()
   throws IOException {
     HConnection conn = HConnectionManager.getConnection(conf);
     return conn.getMaster();
@@ -170,7 +170,7 @@ public class DistributedHBaseCluster ext
     long start = System.currentTimeMillis();
     while (System.currentTimeMillis() - start < timeout) {
       try {
-        getMaster();
+        getMasterAdminService();
         return true;
       } catch (MasterNotRunningException m) {
         LOG.warn("Master not started yet " + m);

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/Action.java Tue Mar 25 19:34:52 2014
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hbase.chaos.actions;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.LinkedList;
 import java.util.List;
@@ -58,11 +59,25 @@ public class Action {
 
   public void perform() throws Exception { }
 
-  /** Returns current region servers */
+  /** Returns current region servers - active master */
   protected ServerName[] getCurrentServers() throws IOException {
-    Collection<ServerName> regionServers = cluster.getClusterStatus().getServers();
-    if (regionServers == null || regionServers.size() <= 0) return new ServerName [] {};
-    return regionServers.toArray(new ServerName[regionServers.size()]);
+    ClusterStatus clusterStatus = cluster.getClusterStatus();
+    Collection<ServerName> regionServers = clusterStatus.getServers();
+    int count = regionServers == null ? 0 : regionServers.size();
+    if (count <= 0) {
+      return new ServerName [] {};
+    }
+    ServerName master = clusterStatus.getMaster();
+    if (master == null || !regionServers.contains(master)) {
+      return regionServers.toArray(new ServerName[count]);
+    }
+    if (count == 1) {
+      return new ServerName [] {};
+    }
+    ArrayList<ServerName> tmp = new ArrayList<ServerName>(count);
+    tmp.addAll(regionServers);
+    tmp.remove(master);
+    return tmp.toArray(new ServerName[count-1]);
   }
 
   protected void killMaster(ServerName server) throws IOException {

Modified: hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java (original)
+++ hbase/trunk/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RestartRsHoldingMetaAction.java Tue Mar 25 19:34:52 2014
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hbase.chaos.actions;
 
+import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.ServerName;
 
 /**
@@ -35,6 +36,12 @@ public class RestartRsHoldingMetaAction 
       LOG.warn("No server is holding hbase:meta right now.");
       return;
     }
-    restartRs(server, sleepTime);
+    ClusterStatus clusterStatus = cluster.getClusterStatus();
+    if (server.equals(clusterStatus.getMaster())) {
+      // Master holds the meta, so restart the master.
+      restartMaster(server, sleepTime);
+    } else {
+      restartRs(server, sleepTime);
+    }
   }
 }

Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/BackupMasterStatusTmpl.jamon Tue Mar 25 19:34:52 2014
@@ -40,15 +40,14 @@ if (master.isActiveMaster()) {
 
 <%java>
 ServerName [] serverNames = masters.toArray(new ServerName[masters.size()]);
+int infoPort = master.getConfiguration().getInt("hbase.master.info.port", 16010);
 </%java>
 <%if (!master.isActiveMaster()) %>
     <%if serverNames[0] != null %>
         <h2>Master</h2>
-        <a href="//<% serverNames[0].getHostname() %>:
-	   <% master.getConfiguration().getInt("hbase.master.info.port", 16010) %>/master-status"
-	     target="_blank">
-	   <% serverNames[0].getHostname() %>
-	</a>
+        <a href="//<% serverNames[0].getHostname() %>:<%
+          infoPort %>/master-status" target="_blank"><%
+          serverNames[0].getHostname() %></a>
     <%else>
         Unable to parse master hostname.
     </%if>
@@ -66,11 +65,10 @@ ServerName [] serverNames = masters.toAr
     for (ServerName serverName : serverNames) {
     </%java>
     <tr>
-        <td><a href="//<% serverName.getHostname() %>:
-	  <% master.getConfiguration().getInt("hbase.master.info.port", 16010) %>/master-status"
-	     target="_blank">
-	  <% serverName.getHostname() %></a>
-	</td>
+        <td><a href="//<% serverName.getHostname() %>:<%
+          infoPort %>/master-status" target="_blank"><%
+          serverName.getHostname() %></a>
+        </td>
         <td><% serverName.getPort() %></td>
         <td><% new Date(serverName.getStartcode()) %></td>
     </tr>

Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon Tue Mar 25 19:34:52 2014
@@ -63,7 +63,7 @@ AssignmentManager assignmentManager = ma
 <%class>
   public String formatZKString() {
     StringBuilder quorums = new StringBuilder();
-    String zkQuorum = master.getZooKeeperWatcher().getQuorum();
+    String zkQuorum = master.getZooKeeper().getQuorum();
 
     if (null == zkQuorum) {
       return quorums.toString();
@@ -278,7 +278,7 @@ AssignmentManager assignmentManager = ma
 	                </%if>
 	                <tr>
 	                    <td>Coprocessors</td>
-	                    <td><% java.util.Arrays.toString(master.getCoprocessors()) %></td>
+	                    <td><% java.util.Arrays.toString(master.getMasterCoprocessors()) %></td>
 	                    <td>Coprocessors currently loaded by the master</td>
 	                </tr>
                 </%if>

Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon Tue Mar 25 19:34:52 2014
@@ -283,7 +283,7 @@ if  (sl.getTotalCompactingKVs() > 0) {
         </%args>
         <%java>
         int infoPort = master.getRegionServerInfoPort(serverName);
-        String url = "//" + serverName.getHostname() + ":" + infoPort + "/";
+        String url = "//" + serverName.getHostname() + ":" + infoPort + "/rs-status";
         </%java>
 
         <%if (infoPort > 0) %>

Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon Tue Mar 25 19:34:52 2014
@@ -36,10 +36,10 @@ org.apache.hadoop.hbase.zookeeper.Master
   <%java return; %>
 </%if>
 <%java>
-  ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer);
+  ServerInfo serverInfo = ProtobufUtil.getServerInfo(regionServer.getRSRpcServices());
   ServerName serverName = ProtobufUtil.toServerName(serverInfo.getServerName());
-  List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(regionServer);
-  int masterInfoPort = regionServer.getConfiguration().getInt("hbase.master.info.port", 16010);
+  List<HRegionInfo> onlineRegions = ProtobufUtil.getOnlineRegions(regionServer.getRSRpcServices());
+  int infoPort = regionServer.getConfiguration().getInt("hbase.master.info.port", 16010);
   MasterAddressTracker masterAddressTracker = regionServer.getMasterAddressTracker();
   ServerName masterServerName = masterAddressTracker == null ? null
     : masterAddressTracker.getMasterAddress();
@@ -98,7 +98,7 @@ org.apache.hadoop.hbase.zookeeper.Master
 
     <section>
     <h2>Server Metrics</h2>
-    <& ServerMetricsTmpl; mWrap = regionServer.getMetrics().getRegionServerWrapper(); &>
+    <& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper(); &>
     </section>
 
     <section>
@@ -135,7 +135,7 @@ org.apache.hadoop.hbase.zookeeper.Master
         </tr>
         <tr>
             <td>Coprocessors</td>
-            <td><% java.util.Arrays.toString(regionServer.getCoprocessors()) %></td>
+            <td><% java.util.Arrays.toString(regionServer.getRegionServerCoprocessors()) %></td>
             <td>Coprocessors currently loaded by this regionserver</td>
         </tr>
         <tr>
@@ -146,14 +146,12 @@ org.apache.hadoop.hbase.zookeeper.Master
         <tr>
             <td>HBase Master</td>
             <td>
-                <%if (masterInfoPort < 0) %>
-                No hbase.master.info.port found
-                <%elseif masterServerName == null %>
+                <%if masterServerName == null %>
                 No master found
                 <%else>
                 <%java>
-                String host = masterServerName.getHostname() + ":" + masterInfoPort;
-                String url = "//" + host + "/";
+                String host = masterServerName.getHostname() + ":" + infoPort;
+                String url = "//" + host + "/master-status";
                 </%java>
                 <a href="<% url %>"><% host %></a>
                 </%if>

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/client/CoprocessorHConnection.java Tue Mar 25 19:34:52 2014
@@ -32,25 +32,12 @@ import org.apache.hadoop.hbase.ServerNam
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.HConnection;
-import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-import com.google.protobuf.BlockingRpcChannel;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.Descriptors.MethodDescriptor;
-import com.google.protobuf.Message;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
 
 /**
  * Connection to an HTable from within a Coprocessor. We can do some nice tricks since we know we
@@ -106,28 +93,7 @@ public class CoprocessorHConnection impl
     }
     // the client is attempting to write to the same regionserver, we can short-circuit to our
     // local regionserver
-    final BlockingService blocking = ClientService.newReflectiveBlockingService(this.server);
-    final RpcServerInterface rpc = this.server.getRpcServer();
-
-    final MonitoredRPCHandler status =
-        TaskMonitor.get().createRPCStatus(Thread.currentThread().getName());
-    status.pause("Setting up server-local call");
-
-    final long timestamp = EnvironmentEdgeManager.currentTimeMillis();
-    BlockingRpcChannel channel = new BlockingRpcChannel() {
-
-      @Override
-      public Message callBlockingMethod(MethodDescriptor method, RpcController controller,
-          Message request, Message responsePrototype) throws ServiceException {
-        try {
-          // we never need a cell-scanner - everything is already fully formed
-          return rpc.call(blocking, method, request, null, timestamp, status).getFirst();
-        } catch (IOException e) {
-          throw new ServiceException(e);
-        }
-      }
-    };
-    return ClientService.newBlockingStub(channel);
+    return server.getRSRpcServices();
   }
 
   public void abort(String why, Throwable e) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java Tue Mar 25 19:34:52 2014
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hbase.ipc;
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -121,7 +121,6 @@ import com.google.protobuf.Message;
 import com.google.protobuf.Message.Builder;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.TextFormat;
-// Uses Writables doing sasl
 
 /**
  * An RPC server that hosts protobuf described Services.
@@ -254,7 +253,7 @@ public class RpcServer implements RpcSer
 
   private final int warnResponseTime;
   private final int warnResponseSize;
-  private final Object serverInstance;
+  private final Server server;
   private final List<BlockingServiceAndInterface> services;
 
   private final RpcScheduler scheduler;
@@ -318,6 +317,7 @@ public class RpcServer implements RpcSer
      * Short string representation without param info because param itself could be huge depends on
      * the payload of a command
      */
+    @SuppressWarnings("deprecation")
     String toShortString() {
       String serviceName = this.connection.service != null?
         this.connection.service.getDescriptorForType().getName() : "null";
@@ -1837,7 +1837,7 @@ public class RpcServer implements RpcSer
 
   /**
    * Constructs a server listening on the named port and address.
-   * @param serverInstance hosting instance of {@link Server}. We will do authentications if an
+   * @param server hosting instance of {@link Server}. We will do authentications if an
    * instance else pass null for no authentication check.
    * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
    * @param services A list of services.
@@ -1845,12 +1845,12 @@ public class RpcServer implements RpcSer
    * @param conf
    * @throws IOException
    */
-  public RpcServer(final Server serverInstance, final String name,
+  public RpcServer(final Server server, final String name,
       final List<BlockingServiceAndInterface> services,
       final InetSocketAddress isa, Configuration conf,
       RpcScheduler scheduler)
   throws IOException {
-    this.serverInstance = serverInstance;
+    this.server = server;
     this.services = services;
     this.isa = isa;
     this.conf = conf;
@@ -1933,32 +1933,15 @@ public class RpcServer implements RpcSer
   @Override
   public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
 
-  /** Starts the service.  Must be called before any calls will be handled. */
-  @Override
-  public void start() {
-    startThreads();
-    openServer();
-  }
-
-  /**
-   * Open a previously started server.
-   */
-  @Override
-  public void openServer() {
-    this.started = true;
-  }
-
   @Override
   public boolean isStarted() {
     return this.started;
   }
 
-  /**
-   * Starts the service threads but does not allow requests to be responded yet.
-   * Client will get {@link ServerNotRunningYetException} instead.
-   */
+  /** Starts the service.  Must be called before any calls will be handled. */
   @Override
-  public synchronized void startThreads() {
+  public synchronized void start() {
+    if (started) return;
     AuthenticationTokenSecretManager mgr = createSecretManager();
     if (mgr != null) {
       setSecretManager(mgr);
@@ -1969,6 +1952,7 @@ public class RpcServer implements RpcSer
     responder.start();
     listener.start();
     scheduler.start();
+    started = true;
   }
 
   @Override
@@ -1980,9 +1964,7 @@ public class RpcServer implements RpcSer
 
   private AuthenticationTokenSecretManager createSecretManager() {
     if (!isSecurityEnabled) return null;
-    if (serverInstance == null) return null;
-    if (!(serverInstance instanceof org.apache.hadoop.hbase.Server)) return null;
-    org.apache.hadoop.hbase.Server server = (org.apache.hadoop.hbase.Server)serverInstance;
+    if (server == null) return null;
     Configuration conf = server.getConfiguration();
     long keyUpdateInterval =
         conf.getLong("hbase.auth.key.update.interval", 24*60*60*1000);
@@ -2084,9 +2066,9 @@ public class RpcServer implements RpcSer
     responseInfo.put("queuetimems", qTime);
     responseInfo.put("responsesize", responseSize);
     responseInfo.put("client", clientAddress);
-    responseInfo.put("class", serverInstance == null? "": serverInstance.getClass().getSimpleName());
+    responseInfo.put("class", server == null? "": server.getClass().getSimpleName());
     responseInfo.put("method", methodName);
-    if (params.length == 2 && serverInstance instanceof HRegionServer &&
+    if (params.length == 2 && server instanceof HRegionServer &&
         params[0] instanceof byte[] &&
         params[1] instanceof Operation) {
       // if the slow process is a query, we want to log its table as well
@@ -2099,7 +2081,7 @@ public class RpcServer implements RpcSer
       // report to the log file
       LOG.warn("(operation" + tag + "): " +
                MAPPER.writeValueAsString(responseInfo));
-    } else if (params.length == 1 && serverInstance instanceof HRegionServer &&
+    } else if (params.length == 1 && server instanceof HRegionServer &&
         params[0] instanceof Operation) {
       // annotate the response map with operation details
       responseInfo.putAll(((Operation) params[0]).toMap());
@@ -2181,7 +2163,6 @@ public class RpcServer implements RpcSer
    * @param addr InetAddress of incoming connection
    * @throws org.apache.hadoop.security.authorize.AuthorizationException when the client isn't authorized to talk the protocol
    */
-  @SuppressWarnings("static-access")
   public void authorize(UserGroupInformation user, ConnectionHeader connection, InetAddress addr)
   throws AuthorizationException {
     if (authorize) {

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java Tue Mar 25 19:34:52 2014
@@ -44,8 +44,6 @@ import com.google.protobuf.ServiceExcept
 @InterfaceAudience.Private
 public interface RpcServerInterface {
   void start();
-  void openServer();
-  void startThreads();
   boolean isStarted();
 
   void stop();

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java Tue Mar 25 19:34:52 2014
@@ -30,8 +30,8 @@ import com.google.common.base.Strings;
 import com.google.common.collect.Lists;
 
 /**
- * A scheduler that maintains isolated handler pools for general, high-priority and replication
- * requests.
+ * A scheduler that maintains isolated handler pools for general,
+ * high-priority, and replication requests.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving

Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java?rev=1581479&r1=1581478&r2=1581479&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java Tue Mar 25 19:34:52 2014
@@ -24,10 +24,10 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.ZNodeClearer;
-import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.ZNodeClearer;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -65,6 +65,7 @@ public class ActiveMasterManager extends
    */
   ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master) {
     super(watcher);
+    watcher.registerListener(this);
     this.sn = sn;
     this.master = master;
   }
@@ -139,13 +140,15 @@ public class ActiveMasterManager extends
    *
    * This also makes sure that we are watching the master znode so will be
    * notified if another master dies.
-   * @param startupStatus
+   * @param checkInterval the interval to check if the master is stopped
+   * @param startupStatus the monitor status to track the progress
    * @return True if no issue becoming active master else false if another
    * master was running or if some other problem (zookeeper, stop flag has been
    * set on this Master)
    */
-  boolean blockUntilBecomingActiveMaster(MonitoredTask startupStatus) {
-    while (true) {
+  boolean blockUntilBecomingActiveMaster(
+      int checkInterval, MonitoredTask startupStatus) {
+    while (!(master.isAborted() || master.isStopped())) {
       startupStatus.setStatus("Trying to register in ZK as active master");
       // Try to become the active master, watch if there is another master.
       // Write out our ServerName as versioned bytes.
@@ -222,9 +225,9 @@ public class ActiveMasterManager extends
         return false;
       }
       synchronized (this.clusterHasActiveMaster) {
-        while (this.clusterHasActiveMaster.get() && !this.master.isStopped()) {
+        while (clusterHasActiveMaster.get() && !master.isStopped()) {
           try {
-            this.clusterHasActiveMaster.wait();
+            clusterHasActiveMaster.wait(checkInterval);
           } catch (InterruptedException e) {
             // We expect to be interrupted when a master dies,
             //  will fall out if so
@@ -235,18 +238,15 @@ public class ActiveMasterManager extends
           this.master.stop(
             "Cluster went down before this master became active");
         }
-        if (this.master.isStopped()) {
-          return false;
-        }
-        // there is no active master so we can try to become active master again
       }
     }
+    return false;
   }
 
   /**
    * @return True if cluster has an active master.
    */
-  public boolean isActiveMaster() {
+  boolean hasActiveMaster() {
     try {
       if (ZKUtil.checkExists(watcher, watcher.getMasterAddressZNode()) >= 0) {
         return true;
@@ -261,6 +261,11 @@ public class ActiveMasterManager extends
 
   public void stop() {
     try {
+      synchronized (clusterHasActiveMaster) {
+        // Master is already stopped, wake up the manager
+        // thread so that it can shutdown soon.
+        clusterHasActiveMaster.notifyAll();
+      }
       // If our address is in ZK, delete it on our way out
       ServerName activeMaster = null;
       try {