You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/05/03 05:58:35 UTC

svn commit: r1478639 [8/10] - in /hbase/branches/0.95: hbase-client/src/main/java/org/apache/hadoop/hbase/ hbase-client/src/main/java/org/apache/hadoop/hbase/catalog/ hbase-client/src/main/java/org/apache/hadoop/hbase/client/ hbase-client/src/main/java...

Added: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java?rev=1478639&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java (added)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerInterface.java Fri May  3 03:58:33 2013
@@ -0,0 +1,76 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Function;
+import com.google.protobuf.BlockingService;
+import com.google.protobuf.Descriptors.MethodDescriptor;
+import com.google.protobuf.Message;
+import com.google.protobuf.ServiceException;
+
+@InterfaceAudience.Private
+public interface RpcServerInterface {
+  // TODO: Needs cleanup.  Why a 'start', and then a 'startThreads' and an 'openServer'?
+
+  void setSocketSendBufSize(int size);
+
+  void start();
+
+  void stop();
+
+  void join() throws InterruptedException;
+
+  InetSocketAddress getListenerAddress();
+
+  Pair<Message, CellScanner> call(BlockingService service, MethodDescriptor md,
+    Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status)
+  throws IOException, ServiceException;
+
+  void setErrorHandler(HBaseRPCErrorHandler handler);
+
+  void openServer();
+
+  void startThreads();
+
+  /**
+   * Returns the metrics instance for reporting RPC call statistics
+   */
+  MetricsHBaseServer getMetrics();
+
+  public void setQosFunction(Function<Pair<RequestHeader, Message>, Integer> newFunc);
+
+  /**
+   * Refresh autentication manager policy.
+   * @param pp
+   */
+  @VisibleForTesting
+  void refreshAuthManager(PolicyProvider pp);
+}
\ No newline at end of file

Added: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java?rev=1478639&view=auto
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java (added)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/UnknownServiceException.java Fri May  3 03:58:33 2013
@@ -0,0 +1,25 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+@SuppressWarnings("serial")
+public class UnknownServiceException extends FatalConnectionException {
+  UnknownServiceException(final String msg) {
+    super(msg);
+  }
+}
\ No newline at end of file

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java Fri May  3 03:58:33 2013
@@ -543,7 +543,7 @@ public class LoadIncrementalHFiles exten
               + Bytes.toStringBinary(row));
           byte[] regionName = location.getRegionInfo().getRegionName();
           if(!useSecure) {
-            success = ProtobufUtil.bulkLoadHFile(server, famPaths, regionName, assignSeqIds);
+            success = ProtobufUtil.bulkLoadHFile(stub, famPaths, regionName, assignSeqIds);
           } else {
             HTable table = new HTable(conn.getConfiguration(), tableName);
             secureClient = new SecureBulkLoadClient(table);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java Fri May  3 03:58:33 2013
@@ -24,9 +24,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.HConnectable;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ActiveMasterManager.java Fri May  3 03:58:33 2013
@@ -156,7 +156,7 @@ class ActiveMasterManager extends ZooKee
           // We are the master, return
           startupStatus.setStatus("Successfully registered as active master.");
           this.clusterHasActiveMaster.set(true);
-          LOG.info("Master=" + this.sn);
+          LOG.info("Registered Active Master=" + this.sn);
           return true;
         }
 

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Fri May  3 03:58:33 2013
@@ -53,9 +53,6 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HealthCheckChore;
-import org.apache.hadoop.hbase.MasterAdminProtocol;
-import org.apache.hadoop.hbase.MasterMonitorProtocol;
-import org.apache.hadoop.hbase.RegionServerStatusProtocol;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
@@ -79,9 +76,9 @@ import org.apache.hadoop.hbase.exception
 import org.apache.hadoop.hbase.exceptions.UnknownRegionException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.executor.ExecutorType;
-import org.apache.hadoop.hbase.ipc.HBaseServer;
-import org.apache.hadoop.hbase.ipc.HBaseServerRPC;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.balancer.BalancerChore;
 import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
@@ -109,6 +106,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AddColumnResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.AssignRegionRequest;
@@ -161,6 +159,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.TakeSnapshotResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.UnassignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusRequest;
@@ -169,6 +168,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
@@ -227,16 +227,15 @@ import com.google.protobuf.ServiceExcept
  *
  * <p>You can also shutdown just this master.  Call {@link #stopMaster()}.
  *
- * @see MasterMonitorProtocol
- * @see MasterAdminProtocol
- * @see RegionServerStatusProtocol
  * @see Watcher
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
 public class HMaster extends HasThread
-implements MasterMonitorProtocol, MasterAdminProtocol, RegionServerStatusProtocol, MasterServices,
-Server {
+implements MasterMonitorProtos.MasterMonitorService.BlockingInterface,
+MasterAdminProtos.MasterAdminService.BlockingInterface,
+RegionServerStatusProtos.RegionServerStatusService.BlockingInterface,
+MasterServices, Server {
   private static final Log LOG = LogFactory.getLog(HMaster.class.getName());
 
   // MASTER is name of the webapp and the attribute name used stuffing this
@@ -260,7 +259,7 @@ Server {
   private LoadBalancerTracker loadBalancerTracker;
 
   // RPC server for the HMaster
-  private final RpcServer rpcServer;
+  private final RpcServerInterface rpcServer;
   // Set after we've called HBaseServer#openServer and ready to receive RPCs.
   // Set back to false after we stop rpcServer.  Used by tests.
   private volatile boolean rpcServerOpen = false;
@@ -367,8 +366,6 @@ Server {
     this.conf = new Configuration(conf);
     // Disable the block cache on the master
     this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
-    // Set how many times to retry talking to another server over HConnection.
-    HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
     // Server to handle client requests.
     String hostname = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
       conf.get("hbase.master.dns.interface", "default"),
@@ -387,23 +384,22 @@ Server {
         throw new IllegalArgumentException("Failed resolve of bind address " + initialIsa);
       }
     }
+    String name = "master/" + initialIsa.toString();
+    // Set how many times to retry talking to another server over HConnection.
+    HConnectionManager.setServerSideHConnectionRetries(this.conf, name, LOG);
     int numHandlers = conf.getInt("hbase.master.handler.count",
       conf.getInt("hbase.regionserver.handler.count", 25));
-    this.rpcServer = HBaseServerRPC.getServer(MasterMonitorProtocol.class, this,
-        new Class<?>[]{MasterMonitorProtocol.class,
-            MasterAdminProtocol.class, RegionServerStatusProtocol.class},
-        initialIsa.getHostName(), // This is bindAddress if set else it's hostname
-        initialIsa.getPort(),
-        numHandlers,
-        0, // we dont use high priority handlers in master
-        conf.getBoolean("hbase.rpc.verbose", false), conf,
-        0); // this is a DNC w/o high priority handlers
+    this.rpcServer = new RpcServer(this, name, getServices(),
+      initialIsa, // BindAddress is IP we got for this server.
+      numHandlers,
+      0, // we dont use high priority handlers in master
+      conf,
+      0); // this is a DNC w/o high priority handlers
     // Set our address.
     this.isa = this.rpcServer.getListenerAddress();
-    this.serverName = new ServerName(hostname,
-      this.isa.getPort(), System.currentTimeMillis());
+    this.serverName = new ServerName(hostname, this.isa.getPort(), System.currentTimeMillis());
     this.rsFatals = new MemoryBoundedLogMessageBuffer(
-        conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
+      conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
 
     // login the zookeeper client principal (if using security)
     ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file",
@@ -458,6 +454,23 @@ Server {
   }
 
   /**
+   * @return list of blocking services and their security info classes that this server supports
+   */
+  private List<BlockingServiceAndInterface> getServices() {
+    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(3);
+    bssi.add(new BlockingServiceAndInterface(
+        MasterMonitorProtos.MasterMonitorService.newReflectiveBlockingService(this),
+        MasterMonitorProtos.MasterMonitorService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(
+        MasterAdminProtos.MasterAdminService.newReflectiveBlockingService(this),
+        MasterAdminProtos.MasterAdminService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(
+        RegionServerStatusProtos.RegionServerStatusService.newReflectiveBlockingService(this),
+        RegionServerStatusProtos.RegionServerStatusService.BlockingInterface.class));
+    return bssi;
+  }
+
+  /**
    * Stall startup if we are designated a backup master; i.e. we want someone
    * else to become the master before proceeding.
    * @param c configuration
@@ -612,10 +625,10 @@ Server {
     boolean wasUp = this.clusterStatusTracker.isClusterUp();
     if (!wasUp) this.clusterStatusTracker.setClusterUp();
 
-    LOG.info("Server active/primary master; " + this.serverName +
+    LOG.info("Server active/primary master=" + this.serverName +
         ", sessionid=0x" +
         Long.toHexString(this.zooKeeper.getRecoverableZooKeeper().getSessionId()) +
-        ", cluster-up flag was=" + wasUp);
+        ", setting cluster-up flag (Was=" + wasUp + ")");
 
     // create the snapshot manager
     this.snapshotManager = new SnapshotManager(this, this.metricsMaster);
@@ -765,7 +778,7 @@ Server {
     enableServerShutdownHandler();
 
     // Update meta with new PB serialization if required. i.e migrate all HRI to PB serialization
-    // in meta. This must happen before we assign all user regions or else the assignment will 
+    // in meta. This must happen before we assign all user regions or else the assignment will
     // fail.
     // TODO: Remove this after 0.96, when we do 0.98.
     org.apache.hadoop.hbase.catalog.MetaMigrationConvertingToPB
@@ -995,7 +1008,6 @@ Server {
    *  need to install an unexpected exception handler.
    */
   void startServiceThreads() throws IOException{
-
    // Start the executor service pools
    this.executorService.startExecutorService(ExecutorType.MASTER_OPEN_REGION,
       conf.getInt("hbase.master.executor.openregion.threads", 5));
@@ -1037,22 +1049,22 @@ Server {
      this.infoServer.start();
     }
 
-   // Start the health checker
-   if (this.healthCheckChore != null) {
-     Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
-   }
+    // Start the health checker
+    if (this.healthCheckChore != null) {
+      Threads.setDaemonThreadRunning(this.healthCheckChore.getThread(), n + ".healthChecker");
+    }
 
     // Start allowing requests to happen.
     this.rpcServer.openServer();
     this.rpcServerOpen = true;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Started service threads");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Started service threads");
     }
   }
 
   /**
    * Use this when trying to figure when its ok to send in rpcs.  Used by tests.
-   * @return True if we have successfully run {@link HBaseServer#openServer()}
+   * @return True if we have successfully run {@link RpcServer#openServer()}
    */
   boolean isRpcServerOpen() {
     return this.rpcServerOpen;
@@ -1141,7 +1153,7 @@ Server {
   throws UnknownHostException {
     // Do it out here in its own little method so can fake an address when
     // mocking up in tests.
-    return HBaseServer.getRemoteIp();
+    return RpcServer.getRemoteIp();
   }
 
   /**
@@ -2354,9 +2366,9 @@ Server {
   /**
    * Offline specified region from master's in-memory state. It will not attempt to
    * reassign the region as in unassign.
-   *  
+   *
    * This is a special method that should be used by experts or hbck.
-   * 
+   *
    */
   @Override
   public OfflineRegionResponse offlineRegion(RpcController controller, OfflineRegionRequest request)

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterFileSystem.java Fri May  3 03:58:33 2013
@@ -477,7 +477,7 @@ public class MasterFileSystem {
 
   private static void bootstrap(final Path rd, final Configuration c)
   throws IOException {
-    LOG.info("BOOTSTRAP: creating first META region");
+    LOG.info("BOOTSTRAP: creating META region");
     try {
       // Bootstrapping, make sure blockcache is off.  Else, one will be
       // created here in bootstap and it'll need to be cleaned up.  Better to

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Fri May  3 03:58:33 2013
@@ -155,10 +155,7 @@ public interface MasterServices extends 
   public boolean isServerShutdownHandlerEnabled();
 
   /**
-   * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint to
-   * be available for handling
-   * {@link org.apache.hadoop.hbase.MasterAdminProtocol#execMasterService(com.google.protobuf.RpcController,
-   * org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest)} calls.
+   * Registers a new protocol buffer {@link Service} subclass as a master coprocessor endpoint.
    *
    * <p>
    * Only a single instance may be registered for a given {@link Service} subclass (the

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Fri May  3 03:58:33 2013
@@ -27,9 +27,9 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedMap;
-import java.util.Map.Entry;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 
@@ -37,25 +37,25 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
 import org.apache.hadoop.hbase.RegionLoad;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
-import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.RetriesExhaustedException;
+import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
+import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
+import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
+import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
 import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
@@ -112,12 +112,12 @@ public class ServerManager {
   private final Map<ServerName, ServerLoad> onlineServers =
     new ConcurrentHashMap<ServerName, ServerLoad>();
 
-  // TODO: This is strange to have two maps but HSI above is used on both sides
   /**
-   * Map from full server-instance name to the RPC connection for this server.
+   * Map of admin interfaces per registered regionserver; these interfaces we use to control
+   * regionservers out on the cluster
    */
-  private final Map<ServerName, AdminProtocol> serverConnections =
-    new HashMap<ServerName, AdminProtocol>();
+  private final Map<ServerName, AdminService.BlockingInterface> rsAdmins =
+    new HashMap<ServerName, AdminService.BlockingInterface>();
 
   /**
    * List of region servers <ServerName> that should not get any more new
@@ -351,7 +351,7 @@ public class ServerManager {
   void recordNewServer(final ServerName serverName, final ServerLoad sl) {
     LOG.info("Registering server=" + serverName);
     this.onlineServers.put(serverName, sl);
-    this.serverConnections.remove(serverName);
+    this.rsAdmins.remove(serverName);
   }
 
   public long getLastFlushedSequenceId(byte[] regionName) {
@@ -472,7 +472,7 @@ public class ServerManager {
     synchronized (onlineServers) {
       onlineServers.notifyAll();
     }
-    this.serverConnections.remove(serverName);
+    this.rsAdmins.remove(serverName);
     // If cluster is going down, yes, servers are going to be expiring; don't
     // process as a dead server
     if (this.clusterShutdown) {
@@ -591,7 +591,7 @@ public class ServerManager {
   public RegionOpeningState sendRegionOpen(final ServerName server,
       HRegionInfo region, int versionOfOfflineNode)
   throws IOException {
-    AdminProtocol admin = getServerConnection(server);
+    AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
         " failed because no RPC connection found to this server");
@@ -619,7 +619,7 @@ public class ServerManager {
   public List<RegionOpeningState> sendRegionOpen(ServerName server,
       List<Pair<HRegionInfo, Integer>> regionOpenInfos)
   throws IOException {
-    AdminProtocol admin = getServerConnection(server);
+    AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
       LOG.warn("Attempting to send OPEN RPC to server " + server.toString() +
         " failed because no RPC connection found to this server");
@@ -653,7 +653,7 @@ public class ServerManager {
   public boolean sendRegionClose(ServerName server, HRegionInfo region,
     int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
     if (server == null) throw new NullPointerException("Passed server is null");
-    AdminProtocol admin = getServerConnection(server);
+    AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
       throw new IOException("Attempting to send CLOSE RPC to server " +
         server.toString() + " for region " +
@@ -688,7 +688,7 @@ public class ServerManager {
       throw new NullPointerException("Passed server is null");
     if (region_a == null || region_b == null)
       throw new NullPointerException("Passed region is null");
-    AdminProtocol admin = getServerConnection(server);
+    AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
       throw new IOException("Attempting to send MERGE REGIONS RPC to server "
           + server.toString() + " for region "
@@ -701,18 +701,17 @@ public class ServerManager {
 
     /**
     * @param sn
-    * @return
+    * @return Admin interface for the remote regionserver named <code>sn</code>
     * @throws IOException
     * @throws RetriesExhaustedException wrapping a ConnectException if failed
-    * putting up proxy.
     */
-  private AdminProtocol getServerConnection(final ServerName sn)
+  private AdminService.BlockingInterface getRsAdmin(final ServerName sn)
   throws IOException {
-    AdminProtocol admin = this.serverConnections.get(sn);
+    AdminService.BlockingInterface admin = this.rsAdmins.get(sn);
     if (admin == null) {
-      LOG.debug("New connection to " + sn.toString());
+      LOG.debug("New admin connection to " + sn.toString());
       admin = this.connection.getAdmin(sn);
-      this.serverConnections.put(sn, admin);
+      this.rsAdmins.put(sn, admin);
     }
     return admin;
   }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Fri May  3 03:58:33 2013
@@ -178,8 +178,7 @@ public class SplitLogManager extends Zoo
     this.timeout = conf.getInt("hbase.splitlog.manager.timeout", DEFAULT_TIMEOUT);
     this.unassignedTimeout =
       conf.getInt("hbase.splitlog.manager.unassigned.timeout", DEFAULT_UNASSIGNED_TIMEOUT);
-    LOG.info("timeout = " + timeout);
-    LOG.info("unassigned timeout = " + unassignedTimeout);
+    LOG.info("timeout=" + timeout + ", unassigned timeout=" + unassignedTimeout);
 
     this.serverName = serverName;
     this.timeoutMonitor =
@@ -855,7 +854,7 @@ public class SplitLogManager extends Zoo
       }
       getDataSetWatch(nodepath, zkretries);
     }
-    LOG.info("found " + (orphans.size() - rescan_nodes) + " orphan tasks and " +
+    LOG.info("Found " + (orphans.size() - rescan_nodes) + " orphan tasks and " +
         rescan_nodes + " rescan nodes");
   }
 

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ReplicationProtbufUtil.java Fri May  3 03:58:33 2013
@@ -22,12 +22,12 @@ package org.apache.hadoop.hbase.protobuf
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.HConstants;
+
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -79,7 +79,7 @@ public class ReplicationProtbufUtil {
    * @param entries
    * @throws java.io.IOException
    */
-  public static void replicateWALEntry(final AdminProtocol admin,
+  public static void replicateWALEntry(final AdminService.BlockingInterface admin,
       final HLog.Entry[] entries) throws IOException {
     AdminProtos.ReplicateWALEntryRequest request =
       buildReplicateWALEntryRequest(entries);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri May  3 03:58:33 2013
@@ -108,7 +108,7 @@ import org.apache.hadoop.hbase.io.HeapSi
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.io.hfile.BlockCache;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.ipc.HBaseServer;
+import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcCallContext;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@@ -3623,7 +3623,7 @@ public class HRegion implements HeapSize
       if (!results.isEmpty()) {
         throw new IllegalArgumentException("First parameter should be an empty list");
       }
-      RpcCallContext rpcCall = HBaseServer.getCurrentCall();
+      RpcCallContext rpcCall = RpcServer.getCurrentCall();
       // The loop here is used only when at some point during the next we determine
       // that due to effects of filters or otherwise, we have an empty row in the result.
       // Then we loop and try again. Otherwise, we must get out on the first iteration via return,

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri May  3 03:58:33 2013
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.HRegionIn
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.HealthCheckChore;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.RegionServerStatusProtocol;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
@@ -74,9 +73,7 @@ import org.apache.hadoop.hbase.ZNodeClea
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.catalog.MetaReader;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HConnectionManager;
@@ -108,19 +105,19 @@ import org.apache.hadoop.hbase.filter.By
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.ipc.HBaseClientRPC;
 import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler;
-import org.apache.hadoop.hbase.ipc.HBaseServerRPC;
-import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
-import org.apache.hadoop.hbase.ipc.ProtobufRpcClientEngine;
-import org.apache.hadoop.hbase.ipc.RpcClientEngine;
+import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
 import org.apache.hadoop.hbase.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
@@ -179,6 +176,7 @@ import org.apache.hadoop.hbase.protobuf.
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
@@ -218,6 +216,7 @@ import org.apache.hadoop.util.StringUtil
 import org.apache.zookeeper.KeeperException;
 import org.cliffc.high_scale_lib.Counter;
 
+import com.google.protobuf.BlockingRpcChannel;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
@@ -230,8 +229,9 @@ import com.google.protobuf.TextFormat;
  */
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
-public class HRegionServer implements ClientProtocol,
-    AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler, LastSequenceId {
+public class HRegionServer implements ClientProtos.ClientService.BlockingInterface,
+  AdminProtos.AdminService.BlockingInterface, Runnable, RegionServerServices,
+  HBaseRPCErrorHandler, LastSequenceId {
 
   public static final Log LOG = LogFactory.getLog(HRegionServer.class);
 
@@ -326,15 +326,14 @@ public class HRegionServer implements Cl
 
   protected final int numRegionsToReport;
 
-  // Remote HMaster
-  private RegionServerStatusProtocol hbaseMaster;
+  // Stub to do region server status calls against the master.
+  private RegionServerStatusService.BlockingInterface rssStub;
+  // RPC client. Used to make the stub above that does region server status checking.
+  RpcClient rpcClient;
 
   // Server to handle client requests. Default access so can be accessed by
   // unit tests.
-  RpcServer rpcServer;
-
-  // RPC client for communicating with master
-  RpcClientEngine rpcClientEngine;
+  RpcServerInterface rpcServer;
 
   private final InetSocketAddress isa;
   private UncaughtExceptionHandler uncaughtExceptionHandler;
@@ -460,15 +459,12 @@ public class HRegionServer implements Cl
   throws IOException, InterruptedException {
     this.fsOk = true;
     this.conf = conf;
-    // Set how many times to retry talking to another server over HConnection.
-    HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
     this.isOnline = false;
     checkCodecs(this.conf);
 
     // do we use checksum verification in the hbase? If hbase checksum verification
     // is enabled, then we automatically switch off hdfs checksum verification.
-    this.useHBaseChecksum = conf.getBoolean(
-      HConstants.HBASE_CHECKSUM_VERIFICATION, false);
+    this.useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, false);
 
     // Config'ed params
     this.numRetries = conf.getInt("hbase.client.retries.number", 10);
@@ -506,18 +502,17 @@ public class HRegionServer implements Cl
     if (initialIsa.getAddress() == null) {
       throw new IllegalArgumentException("Failed resolve of " + initialIsa);
     }
-
     this.rand = new Random(initialIsa.hashCode());
-    this.rpcServer = HBaseServerRPC.getServer(AdminProtocol.class, this,
-        new Class<?>[]{ClientProtocol.class,
-            AdminProtocol.class, HBaseRPCErrorHandler.class,
-            OnlineRegions.class},
-        initialIsa.getHostName(), // BindAddress is IP we got for this server.
-        initialIsa.getPort(),
-        conf.getInt("hbase.regionserver.handler.count", 10),
-        conf.getInt("hbase.regionserver.metahandler.count", 10),
-        conf.getBoolean("hbase.rpc.verbose", false),
-        conf, HConstants.QOS_THRESHOLD);
+    String name = "regionserver/" + initialIsa.toString();
+    // Set how many times to retry talking to another server over HConnection.
+    HConnectionManager.setServerSideHConnectionRetries(this.conf, name, LOG);
+    this.rpcServer = new RpcServer(this, name, getServices(),
+      /*HBaseRPCErrorHandler.class, OnlineRegions.class},*/
+      initialIsa, // BindAddress is IP we got for this server.
+      conf.getInt("hbase.regionserver.handler.count", 10),
+      conf.getInt("hbase.regionserver.metahandler.count", 10),
+      conf, HConstants.QOS_THRESHOLD);
+
     // Set our address.
     this.isa = this.rpcServer.getListenerAddress();
 
@@ -543,6 +538,20 @@ public class HRegionServer implements Cl
   }
 
   /**
+   * @return list of blocking services and their security info classes that this server supports
+   */
+  private List<BlockingServiceAndInterface> getServices() {
+    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(2);
+    bssi.add(new BlockingServiceAndInterface(
+        ClientProtos.ClientService.newReflectiveBlockingService(this),
+        ClientProtos.ClientService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(
+        AdminProtos.AdminService.newReflectiveBlockingService(this),
+        AdminProtos.AdminService.BlockingInterface.class));
+    return bssi;
+  }
+
+  /**
    * Run test on configured codecs to make sure supporting libs are in place.
    * @param c
    * @throws IOException
@@ -706,7 +715,7 @@ public class HRegionServer implements Cl
     movedRegionsCleaner = MovedRegionsCleaner.createAndStart(this);
 
     // Setup RPC client for master communication
-    rpcClientEngine = new ProtobufRpcClientEngine(conf, clusterId);
+    rpcClient = new RpcClient(conf, clusterId);
   }
 
   /**
@@ -870,10 +879,10 @@ public class HRegionServer implements Cl
     }
 
     // Make sure the proxy is down.
-    if (this.hbaseMaster != null) {
-      this.hbaseMaster = null;
+    if (this.rssStub != null) {
+      this.rssStub = null;
     }
-    this.rpcClientEngine.close();
+    this.rpcClient.stop();
     this.leases.close();
 
     if (!killed) {
@@ -920,7 +929,7 @@ public class HRegionServer implements Cl
         this.serverNameFromMasterPOV.getVersionedBytes());
       request.setServer(ProtobufUtil.toServerName(sn));
       request.setLoad(sl);
-      this.hbaseMaster.regionServerReport(null, request.build());
+      this.rssStub.regionServerReport(null, request.build());
     } catch (ServiceException se) {
       IOException ioe = ProtobufUtil.getRemoteException(se);
       if (ioe instanceof YouAreDeadException) {
@@ -929,7 +938,9 @@ public class HRegionServer implements Cl
       }
       // Couldn't connect to the master, get location from zk and reconnect
       // Method blocks until new master is found or we are stopped
-      getMaster();
+      Pair<ServerName, RegionServerStatusService.BlockingInterface> p =
+        createRegionServerStatusStub();
+      this.rssStub = p.getSecond();
     }
   }
 
@@ -1078,9 +1089,11 @@ public class HRegionServer implements Cl
           String hostnameFromMasterPOV = e.getValue();
           this.serverNameFromMasterPOV = new ServerName(hostnameFromMasterPOV,
             this.isa.getPort(), this.startcode);
-          LOG.info("Master passed us hostname to use. Was=" +
-            this.isa.getHostName() + ", Now=" +
-            this.serverNameFromMasterPOV.getHostname());
+          if (!this.serverNameFromMasterPOV.equals(this.isa.getHostName())) {
+            LOG.info("Master passed us a different hostname to use; was=" +
+              this.isa.getHostName() + ", but now=" +
+              this.serverNameFromMasterPOV.getHostname());
+          }
           continue;
         }
         String value = e.getValue();
@@ -1301,10 +1314,10 @@ public class HRegionServer implements Cl
           FlushRequester requester = server.getFlushRequester();
           if (requester != null) {
             long randomDelay = rand.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME;
-            LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() + 
+            LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() +
                 " after a delay of " + randomDelay);
             //Throttle the flushes by putting a delay. If we don't throttle, and there
-            //is a balanced write-load on the regions in a table, we might end up 
+            //is a balanced write-load on the regions in a table, we might end up
             //overwhelming the filesystem with too many flushes at once.
             requester.requestDelayedFlush(r, randomDelay);
           }
@@ -1629,7 +1642,7 @@ public class HRegionServer implements Cl
   }
 
   @Override
-  public RpcServer getRpcServer() {
+  public RpcServerInterface getRpcServer() {
     return rpcServer;
   }
 
@@ -1662,14 +1675,14 @@ public class HRegionServer implements Cl
         msg += "\nCause:\n" + StringUtils.stringifyException(cause);
       }
       // Report to the master but only if we have already registered with the master.
-      if (hbaseMaster != null && this.serverNameFromMasterPOV != null) {
+      if (rssStub != null && this.serverNameFromMasterPOV != null) {
         ReportRSFatalErrorRequest.Builder builder =
           ReportRSFatalErrorRequest.newBuilder();
         ServerName sn =
           ServerName.parseVersionedServerName(this.serverNameFromMasterPOV.getVersionedBytes());
         builder.setServer(ProtobufUtil.toServerName(sn));
         builder.setErrorMessage(msg);
-        hbaseMaster.reportRSFatalError(null, builder.build());
+        rssStub.reportRSFatalError(null, builder.build());
       }
     } catch (Throwable t) {
       LOG.warn("Unable to report fatal error to master", t);
@@ -1753,14 +1766,16 @@ public class HRegionServer implements Cl
    *
    * @return master + port, or null if server has been stopped
    */
-  private ServerName getMaster() {
-    ServerName masterServerName = null;
+  private Pair<ServerName, RegionServerStatusService.BlockingInterface>
+  createRegionServerStatusStub() {
+    ServerName sn = null;
     long previousLogTime = 0;
-    RegionServerStatusProtocol master = null;
+    RegionServerStatusService.BlockingInterface master = null;
     boolean refresh = false; // for the first time, use cached data
+    RegionServerStatusService.BlockingInterface intf = null;
     while (keepLooping() && master == null) {
-      masterServerName = this.masterAddressManager.getMasterAddress(refresh);
-      if (masterServerName == null) {
+      sn = this.masterAddressManager.getMasterAddress(refresh);
+      if (sn == null) {
         if (!keepLooping()) {
           // give up with no connection.
           LOG.debug("No master found and cluster is stopped; bailing out");
@@ -1769,22 +1784,20 @@ public class HRegionServer implements Cl
         LOG.debug("No master found; retry");
         previousLogTime = System.currentTimeMillis();
         refresh = true; // let's try pull it from ZK directly
-
         sleeper.sleep();
         continue;
       }
 
       InetSocketAddress isa =
-        new InetSocketAddress(masterServerName.getHostname(), masterServerName.getPort());
+        new InetSocketAddress(sn.getHostname(), sn.getPort());
 
       LOG.info("Attempting connect to Master server at " +
         this.masterAddressManager.getMasterAddress());
       try {
-        // Do initial RPC setup. The final argument indicates that the RPC
-        // should retry indefinitely.
-        master = HBaseClientRPC.waitForProxy(rpcClientEngine, RegionServerStatusProtocol.class,
-            isa, this.conf, -1, this.rpcTimeout, this.rpcTimeout);
-        LOG.info("Connected to master at " + isa);
+        BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(sn,
+            User.getCurrent(), this.rpcTimeout);
+        intf = RegionServerStatusService.newBlockingStub(channel);
+        break;
       } catch (IOException e) {
         e = e instanceof RemoteException ?
             ((RemoteException)e).unwrapRemoteException() : e;
@@ -1805,8 +1818,7 @@ public class HRegionServer implements Cl
         }
       }
     }
-    this.hbaseMaster = master;
-    return masterServerName;
+    return new Pair<ServerName, RegionServerStatusService.BlockingInterface>(sn, intf);
   }
 
   /**
@@ -1826,7 +1838,10 @@ public class HRegionServer implements Cl
    */
   private RegionServerStartupResponse reportForDuty() throws IOException {
     RegionServerStartupResponse result = null;
-    ServerName masterServerName = getMaster();
+    Pair<ServerName, RegionServerStatusService.BlockingInterface> p =
+      createRegionServerStatusStub();
+    this.rssStub = p.getSecond();
+    ServerName masterServerName = p.getFirst();
     if (masterServerName == null) return result;
     try {
       this.requestCount.set(0);
@@ -1838,7 +1853,7 @@ public class HRegionServer implements Cl
       request.setPort(port);
       request.setServerStartCode(this.startcode);
       request.setServerCurrentTime(now);
-      result = this.hbaseMaster.regionServerStartup(null, request.build());
+      result = this.rssStub.regionServerStartup(null, request.build());
     } catch (ServiceException se) {
       IOException ioe = ProtobufUtil.getRemoteException(se);
       if (ioe instanceof ClockOutOfSyncException) {
@@ -1858,7 +1873,7 @@ public class HRegionServer implements Cl
     try {
       GetLastFlushedSequenceIdRequest req =
         RequestConverter.buildGetLastFlushedSequenceIdRequest(region);
-      lastFlushedSequenceId = hbaseMaster.getLastFlushedSequenceId(null, req)
+      lastFlushedSequenceId = rssStub.getLastFlushedSequenceId(null, req)
       .getLastFlushedSequenceId();
     } catch (ServiceException e) {
       lastFlushedSequenceId = -1l;
@@ -3062,8 +3077,7 @@ public class HRegionServer implements Cl
         builder.setMoreResults(moreResults);
         return builder.build();
       } catch (Throwable t) {
-        if (scannerName != null &&
-            t instanceof NotServingRegionException) {
+        if (scannerName != null && t instanceof NotServingRegionException) {
           scanners.remove(scannerName);
         }
         throw convertThrowableToIOE(cleanup(t));

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java Fri May  3 03:58:33 2013
@@ -24,9 +24,10 @@ import java.util.concurrent.ConcurrentMa
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.catalog.CatalogTracker;
 import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.zookeeper.KeeperException;
@@ -80,7 +81,7 @@ public interface RegionServerServices ex
   /**
    * Returns a reference to the region server's RPC server
    */
-  public RpcServer getRpcServer();
+  public RpcServerInterface getRpcServer();
 
   /**
    * Get the regions that are currently being opened or closed in the RS

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java Fri May  3 03:58:33 2013
@@ -441,7 +441,7 @@ class FSHLog implements HLog, Syncable {
       }
     }
     if (m != null) {
-      LOG.info("Using getNumCurrentReplicas--HDFS-826");
+      if (LOG.isTraceEnabled()) LOG.trace("Using getNumCurrentReplicas--HDFS-826");
     }
     return m;
   }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java Fri May  3 03:58:33 2013
@@ -49,12 +49,12 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.exceptions.TableNotFoundException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.ReplicationProtbufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -718,7 +718,7 @@ public class ReplicationSource extends T
         continue;
       }
       try {
-        AdminProtocol rrs = getRS();
+        AdminService.BlockingInterface rrs = getRS();
         ReplicationProtbufUtil.replicateWALEntry(rrs,
             Arrays.copyOf(this.entriesArray, currentNbEntries));
         if (this.lastLoggedPosition != this.repLogReader.getPosition()) {
@@ -848,7 +848,7 @@ public class ReplicationSource extends T
    * @return
    * @throws IOException
    */
-  private AdminProtocol getRS() throws IOException {
+  private AdminService.BlockingInterface getRS() throws IOException {
     if (this.currentPeers.size() == 0) {
       throw new IOException(this.peerClusterZnode + " has 0 region servers");
     }
@@ -867,7 +867,7 @@ public class ReplicationSource extends T
     Thread pingThread = new Thread() {
       public void run() {
         try {
-          AdminProtocol rrs = getRS();
+          AdminService.BlockingInterface rrs = getRS();
           // Dummy call which should fail
           ProtobufUtil.getServerInfo(rrs);
           latch.countDown();

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBasePolicyProvider.java Fri May  3 03:58:33 2013
@@ -18,11 +18,11 @@
 package org.apache.hadoop.hbase.security;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
-import org.apache.hadoop.hbase.MasterMonitorProtocol;
-import org.apache.hadoop.hbase.MasterAdminProtocol;
-import org.apache.hadoop.hbase.RegionServerStatusProtocol;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
@@ -33,11 +33,11 @@ import org.apache.hadoop.security.author
  */
 public class HBasePolicyProvider extends PolicyProvider {
   protected final static Service[] services = {
-      new Service("security.client.protocol.acl", ClientProtocol.class),
-      new Service("security.client.protocol.acl", AdminProtocol.class),
-      new Service("security.admin.protocol.acl", MasterMonitorProtocol.class),
-      new Service("security.admin.protocol.acl", MasterAdminProtocol.class),
-      new Service("security.masterregion.protocol.acl", RegionServerStatusProtocol.class)
+      new Service("security.client.protocol.acl", ClientService.BlockingInterface.class),
+      new Service("security.client.protocol.acl", AdminService.BlockingInterface.class),
+      new Service("security.admin.protocol.acl", MasterMonitorService.BlockingInterface.class),
+      new Service("security.admin.protocol.acl", MasterAdminService.BlockingInterface.class),
+      new Service("security.masterregion.protocol.acl", RegionServerStatusService.BlockingInterface.class)
   };
 
   @Override
@@ -45,13 +45,11 @@ public class HBasePolicyProvider extends
     return services;
   }
 
-  public static void init(Configuration conf,
-      ServiceAuthorizationManager authManager) {
+  public static void init(Configuration conf, ServiceAuthorizationManager authManager) {
     // set service-level authorization security policy
     System.setProperty("hadoop.policy.file", "hbase-policy.xml");
-    if (conf.getBoolean(
-          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
+    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
       authManager.refresh(conf, new HBasePolicyProvider());
     }
   }
-}
+}
\ No newline at end of file

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcServer.java Fri May  3 03:58:33 2013
@@ -34,7 +34,7 @@ import javax.security.sasl.Sasl;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.ipc.HBaseServer;
+import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -96,11 +96,11 @@ public class HBaseSaslRpcServer {
   /** CallbackHandler for SASL DIGEST-MD5 mechanism */
   public static class SaslDigestCallbackHandler implements CallbackHandler {
     private SecretManager<TokenIdentifier> secretManager;
-    private HBaseServer.Connection connection;
+    private RpcServer.Connection connection;
 
     public SaslDigestCallbackHandler(
         SecretManager<TokenIdentifier> secretManager,
-        HBaseServer.Connection connection) {
+        RpcServer.Connection connection) {
       this.secretManager = secretManager;
       this.connection = connection;
     }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/TokenProvider.java Fri May  3 03:58:33 2013
@@ -29,9 +29,9 @@ import org.apache.hadoop.hbase.Coprocess
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorService;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.ipc.HBaseServer;
-import org.apache.hadoop.hbase.ipc.RequestContext;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.ipc.RequestContext;
+import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
@@ -60,8 +60,8 @@ public class TokenProvider implements Au
     if (env instanceof RegionCoprocessorEnvironment) {
       RegionCoprocessorEnvironment regionEnv =
           (RegionCoprocessorEnvironment)env;
-      RpcServer server = regionEnv.getRegionServerServices().getRpcServer();
-      SecretManager<?> mgr = ((HBaseServer)server).getSecretManager();
+      RpcServerInterface server = regionEnv.getRegionServerServices().getRpcServer();
+      SecretManager<?> mgr = ((RpcServer)server).getSecretManager();
       if (mgr instanceof AuthenticationTokenSecretManager) {
         secretManager = (AuthenticationTokenSecretManager)mgr;
       }

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java Fri May  3 03:58:33 2013
@@ -66,13 +66,12 @@ import org.apache.hadoop.hbase.HTableDes
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnectable;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.MetaScanner;
 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
@@ -86,6 +85,7 @@ import org.apache.hadoop.hbase.io.hfile.
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterFileSystem;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
@@ -3062,8 +3062,7 @@ public class HBaseFsck extends Configure
     public synchronized Void call() throws IOException {
       errors.progress();
       try {
-        AdminProtocol server =
-          connection.getAdmin(rsinfo);
+        BlockingInterface server = connection.getAdmin(rsinfo);
 
         // list all online regions from this region server
         List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsckRepair.java Fri May  3 03:58:33 2013
@@ -32,14 +32,14 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
-import org.apache.hadoop.hbase.client.AdminProtocol;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
 import org.apache.zookeeper.KeeperException;
@@ -149,7 +149,7 @@ public class HBaseFsckRepair {
   public static void closeRegionSilentlyAndWait(HBaseAdmin admin,
       ServerName server, HRegionInfo region) throws IOException, InterruptedException {
     HConnection connection = admin.getConnection();
-    AdminProtocol rs = connection.getAdmin(server);
+    AdminService.BlockingInterface rs = connection.getAdmin(server);
     ProtobufUtil.closeRegion(rs, region.getRegionName(), false);
     long timeout = admin.getConfiguration()
       .getLong("hbase.hbck.close.timeout", 120000);

Modified: hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java (original)
+++ hbase/branches/0.95/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java Fri May  3 03:58:33 2013
@@ -39,9 +39,9 @@ import org.apache.hadoop.hbase.exception
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.client.HConnectable;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
-import org.apache.hadoop.hbase.client.HConnectionManager.HConnectable;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java Fri May  3 03:58:33 2013
@@ -25,8 +25,10 @@ import org.apache.commons.logging.LogFac
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
 import org.apache.hadoop.hbase.util.Threads;
 
 /**
@@ -94,26 +96,28 @@ public abstract class HBaseCluster imple
   }
 
   /**
-   * Returns an {@link MasterAdminProtocol} to the active master
+   * Returns an {@link MasterAdminService.BlockingInterface} to the active master
    */
-  public abstract MasterAdminProtocol getMasterAdmin()
+  public abstract MasterAdminService.BlockingInterface getMasterAdmin()
       throws IOException;
 
   /**
-   * Returns an {@link MasterMonitorProtocol} to the active master
+   * Returns an {@link MasterMonitorService.BlockingInterface} to the active master
    */
-  public abstract MasterMonitorProtocol getMasterMonitor()
-      throws IOException;
+  public abstract MasterMonitorService.BlockingInterface getMasterMonitor()
+  throws IOException;
 
   /**
    * Returns an AdminProtocol interface to the regionserver
    */
-  public abstract AdminProtocol getAdminProtocol(ServerName serverName) throws IOException;
+  public abstract AdminService.BlockingInterface getAdminProtocol(ServerName serverName)
+  throws IOException;
 
   /**
    * Returns a ClientProtocol interface to the regionserver
    */
-  public abstract ClientProtocol getClientProtocol(ServerName serverName) throws IOException;
+  public abstract ClientService.BlockingInterface getClientProtocol(ServerName serverName)
+  throws IOException;
 
   /**
    * Starts a new region server on the given hostname or if this is a mini/local cluster,

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java Fri May  3 03:58:33 2013
@@ -29,10 +29,12 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.MasterAdminService;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.MasterMonitorService;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -372,12 +374,12 @@ public class MiniHBaseCluster extends HB
   }
 
   @Override
-  public MasterAdminProtocol getMasterAdmin() {
+  public MasterAdminService.BlockingInterface getMasterAdmin() {
     return this.hbaseCluster.getActiveMaster();
   }
 
   @Override
-  public MasterMonitorProtocol getMasterMonitor() {
+  public MasterMonitorService.BlockingInterface getMasterMonitor() {
     return this.hbaseCluster.getActiveMaster();
   }
 
@@ -712,12 +714,13 @@ public class MiniHBaseCluster extends HB
   }
 
   @Override
-  public AdminProtocol getAdminProtocol(ServerName serverName) throws IOException {
+  public AdminService.BlockingInterface getAdminProtocol(ServerName serverName) throws IOException {
     return getRegionServer(getRegionServerIndex(serverName));
   }
 
   @Override
-  public ClientProtocol getClientProtocol(ServerName serverName) throws IOException {
+  public ClientService.BlockingInterface getClientProtocol(ServerName serverName)
+  throws IOException {
     return getRegionServer(getRegionServerIndex(serverName));
   }
 }

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestCatalogTracker.java Fri May  3 03:58:33 2013
@@ -22,8 +22,6 @@ import static org.junit.Assert.assertTru
 
 import java.io.IOException;
 import java.net.ConnectException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import junit.framework.Assert;
@@ -37,25 +35,20 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.MediumTests;
-import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.AdminProtocol;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException;
-import org.apache.hadoop.hbase.client.ServerCallable;
+import org.apache.hadoop.hbase.exceptions.NotAllMetaRegionsOnlineException;
 import org.apache.hadoop.hbase.exceptions.ServerNotRunningYetException;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.Progressable;
 import org.apache.zookeeper.KeeperException;
@@ -63,7 +56,6 @@ import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.mockito.Mockito;
@@ -149,7 +141,8 @@ public class TestCatalogTracker {
    */
   @Test public void testInterruptWaitOnMeta()
   throws IOException, InterruptedException, ServiceException {
-    final ClientProtocol client = Mockito.mock(ClientProtocol.class);
+    final ClientProtos.ClientService.BlockingInterface client =
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
     HConnection connection = mockConnection(null, client);
     try {
       Mockito.when(client.get((RpcController)Mockito.any(), (GetRequest)Mockito.any())).
@@ -183,7 +176,8 @@ public class TestCatalogTracker {
   private void testVerifyMetaRegionLocationWithException(Exception ex)
   throws IOException, InterruptedException, KeeperException, ServiceException {
     // Mock an ClientProtocol.
-    final ClientProtocol implementation = Mockito.mock(ClientProtocol.class);
+    final ClientProtos.ClientService.BlockingInterface implementation =
+      Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
     HConnection connection = mockConnection(null, implementation);
     try {
       // If a 'get' is called on mocked interface, throw connection refused.
@@ -253,8 +247,8 @@ public class TestCatalogTracker {
     HConnection connection = Mockito.mock(HConnection.class);
     ServiceException connectException =
       new ServiceException(new ConnectException("Connection refused"));
-    final AdminProtocol implementation =
-      Mockito.mock(AdminProtocol.class);
+    final AdminProtos.AdminService.BlockingInterface implementation =
+      Mockito.mock(AdminProtos.AdminService.BlockingInterface.class);
     Mockito.when(implementation.getRegionInfo((RpcController)Mockito.any(),
       (GetRegionInfoRequest)Mockito.any())).thenThrow(connectException);
     Mockito.when(connection.getAdmin(Mockito.any(ServerName.class), Mockito.anyBoolean())).
@@ -309,22 +303,23 @@ public class TestCatalogTracker {
   }
 
   /**
-   * @param admin An {@link AdminProtocol} instance; you'll likely
+   * @param admin An {@link AdminProtos.AdminService.BlockingInterface} instance; you'll likely
    * want to pass a mocked HRS; can be null.
    * @param client A mocked ClientProtocol instance, can be null
    * @return Mock up a connection that returns a {@link Configuration} when
    * {@link HConnection#getConfiguration()} is called, a 'location' when
    * {@link HConnection#getRegionLocation(byte[], byte[], boolean)} is called,
-   * and that returns the passed {@link AdminProtocol} instance when
+   * and that returns the passed {@link AdminProtos.AdminService.BlockingInterface} instance when
    * {@link HConnection#getAdmin(ServerName)} is called, returns the passed
-   * {@link ClientProtocol} instance when {@link HConnection#getClient(ServerName)}
-   * is called (Be sure call
+   * {@link ClientProtos.ClientService.BlockingInterface} instance when
+   * {@link HConnection#getClient(ServerName)} is called (Be sure to call
    * {@link HConnectionManager#deleteConnection(org.apache.hadoop.conf.Configuration)}
    * when done with this mocked Connection.
    * @throws IOException
    */
-  private HConnection mockConnection(final AdminProtocol admin,
-      final ClientProtocol client) throws IOException {
+  private HConnection mockConnection(final AdminProtos.AdminService.BlockingInterface admin,
+      final ClientProtos.ClientService.BlockingInterface client)
+  throws IOException {
     HConnection connection =
       HConnectionTestingUtility.getMockedConnection(UTIL.getConfiguration());
     Mockito.doNothing().when(connection).close();

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditor.java Fri May  3 03:58:33 2013
@@ -30,12 +30,14 @@ import java.util.concurrent.atomic.Atomi
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.ScannerCallable;
-import org.apache.hadoop.hbase.ipc.HBaseClient;
-import org.apache.hadoop.hbase.ipc.HBaseServer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -43,8 +45,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.log4j.Level;
 
 /**
  * Test {@link MetaReader}, {@link MetaEditor}.
@@ -71,9 +71,6 @@ public class TestMetaReaderEditor {
   };
 
   @BeforeClass public static void beforeClass() throws Exception {
-    ((Log4JLogger)HBaseServer.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)HBaseClient.LOG).getLogger().setLevel(Level.ALL);
-    ((Log4JLogger)ScannerCallable.LOG).getLogger().setLevel(Level.ALL);
     UTIL.startMiniCluster(3);
 
     Configuration c = new Configuration(UTIL.getConfiguration());

Modified: hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java?rev=1478639&r1=1478638&r2=1478639&view=diff
==============================================================================
--- hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java (original)
+++ hbase/branches/0.95/hbase-server/src/test/java/org/apache/hadoop/hbase/catalog/TestMetaReaderEditorNoCluster.java Fri May  3 03:58:33 2013
@@ -37,12 +37,12 @@ import org.apache.hadoop.hbase.HRegionLo
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MediumTests;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.ClientProtocol;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -138,7 +138,8 @@ public class TestMetaReaderEditorNoClust
     try {
       // Mock an ClientProtocol. Our mock implementation will fail a few
       // times when we go to open a scanner.
-      final ClientProtocol implementation = Mockito.mock(ClientProtocol.class);
+      final ClientProtos.ClientService.BlockingInterface implementation =
+        Mockito.mock(ClientProtos.ClientService.BlockingInterface.class);
       // When scan called throw IOE 'Server not running' a few times
       // before we return a scanner id.  Whats WEIRD is that these
       // exceptions do not show in the log because they are caught and only