You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/01/04 07:39:03 UTC

[01/50] [abbrv] hbase git commit: HBASE-11392 add/remove peer requests should be routed through master

Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 3c35a722d -> 05ab41d1b


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index b283ed9..384ac67 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -32,6 +32,7 @@ import "ClusterStatus.proto";
 import "ErrorHandling.proto";
 import "Procedure.proto";
 import "Quota.proto";
+import "Replication.proto";
 
 /* Column-level protobufs */
 
@@ -846,4 +847,12 @@ service MasterService {
   /** returns a list of procedures */
   rpc ListProcedures(ListProceduresRequest)
     returns(ListProceduresResponse);
+
+  /** Add a replication peer */
+  rpc AddReplicationPeer(AddReplicationPeerRequest)
+    returns(AddReplicationPeerResponse);
+
+  /** Remove a replication peer */
+  rpc RemoveReplicationPeer(RemoveReplicationPeerRequest)
+    returns(RemoveReplicationPeerResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
new file mode 100644
index 0000000..0bdf2c0
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -0,0 +1,42 @@
+ /**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "ReplicationProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "ZooKeeper.proto";
+
+message AddReplicationPeerRequest {
+  required string peer_id = 1;
+  required ReplicationPeer peer_config = 2;
+}
+
+message AddReplicationPeerResponse {
+}
+
+message RemoveReplicationPeerRequest {
+  required string peer_id = 1;
+}
+
+message RemoveReplicationPeerResponse {
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 9abcd52..5067b3b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 
@@ -1827,4 +1828,45 @@ public interface MasterObserver extends Coprocessor {
   void postBalanceRSGroup(final ObserverContext<MasterCoprocessorEnvironment> ctx,
                           String groupName, boolean balancerRan) throws IOException;
 
+  /**
+   * Called before add a replication peer
+   * @param ctx the environment to interact with the framework and master
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @throws IOException on failure
+   */
+  default void preAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+  }
+
+  /**
+   * Called after add a replication peer
+   * @param ctx the environment to interact with the framework and master
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @throws IOException on failure
+   */
+  default void postAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+  }
+
+  /**
+   * Called before remove a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
+
+  /**
+   * Called after remove a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void postRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index c5c246b..da35da1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
+import org.apache.hadoop.hbase.master.replication.ReplicationManager;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
@@ -138,7 +139,12 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionSplitPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy;
 import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
+import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
 import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -291,6 +297,9 @@ public class HMaster extends HRegionServer implements MasterServices {
   // manager of assignment nodes in zookeeper
   private AssignmentManager assignmentManager;
 
+  // manager of replication
+  private ReplicationManager replicationManager;
+
   // buffer for "fatal error" notices from region servers
   // in the cluster. This is only used for assisting
   // operations/debugging.
@@ -640,6 +649,8 @@ public class HMaster extends HRegionServer implements MasterServices {
       this.balancer, this.service, this.metricsMaster,
       this.tableLockManager, tableStateManager);
 
+    this.replicationManager = new ReplicationManager(conf, zooKeeper, this);
+
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this, this.serverManager);
     this.regionServerTracker.start();
 
@@ -3135,4 +3146,30 @@ public class HMaster extends HRegionServer implements MasterServices {
   public FavoredNodesManager getFavoredNodesManager() {
     return favoredNodesManager;
   }
+
+  @Override
+  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException {
+    if (cpHost != null) {
+      cpHost.preAddReplicationPeer(peerId, peerConfig);
+    }
+    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
+        + peerConfig);
+    this.replicationManager.addReplicationPeer(peerId, peerConfig);
+    if (cpHost != null) {
+      cpHost.postAddReplicationPeer(peerId, peerConfig);
+    }
+  }
+
+  @Override
+  public void removeReplicationPeer(String peerId) throws ReplicationException, IOException {
+    if (cpHost != null) {
+      cpHost.preRemoveReplicationPeer(peerId);
+    }
+    LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
+    this.replicationManager.removeReplicationPeer(peerId);
+    if (cpHost != null) {
+      cpHost.postRemoveReplicationPeer(peerId);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index a18068d..97fbe67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.security.User;
@@ -1645,4 +1646,45 @@ public class MasterCoprocessorHost
     });
   }
 
+  public void preAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preAddReplicationPeer(ctx, peerId, peerConfig);
+      }
+    });
+  }
+
+  public void postAddReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+      throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postAddReplicationPeer(ctx, peerId, peerConfig);
+      }
+    });
+  }
+
+  public void preRemoveReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preRemoveReplicationPeer(ctx, peerId);
+      }
+    });
+  }
+
+  public void postRemoveReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postRemoveReplicationPeer(ctx, peerId);
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 2990076..afd807c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
@@ -86,7 +87,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@@ -1638,4 +1644,27 @@ public class MasterRpcServices extends RSRpcServices
     }
     return null;
   }
+
+  @Override
+  public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
+      AddReplicationPeerRequest request) throws ServiceException {
+    try {
+      master.addReplicationPeer(request.getPeerId(),
+        ReplicationSerDeHelper.convert(request.getPeerConfig()));
+      return AddReplicationPeerResponse.newBuilder().build();
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
+      RemoveReplicationPeerRequest request) throws ServiceException {
+    try {
+      master.removeReplicationPeer(request.getPeerId());
+      return RemoveReplicationPeerResponse.newBuilder().build();
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 7845101..5fc9d16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -39,6 +39,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 
 import com.google.protobuf.Service;
 
@@ -415,4 +417,18 @@ public interface MasterServices extends Server {
    * @return Favored Nodes Manager
    */
   public FavoredNodesManager getFavoredNodesManager();
+
+  /**
+   * Add a new replication peer for replicating data to slave cluster
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication slave cluster
+   */
+  void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException;
+
+  /**
+   * Removes a peer and stops the replication
+   * @param peerId a short name that identifies the peer
+   */
+  void removeReplicationPeer(String peerId) throws ReplicationException, IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
new file mode 100644
index 0000000..748f7af
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationFactory;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeers;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+
+/**
+ * Manages and performs all replication admin operations.
+ * Used to add/remove a replication peer.
+ */
+@InterfaceAudience.Private
+public class ReplicationManager {
+
+  private final Configuration conf;
+  private final ZooKeeperWatcher zkw;
+  private final ReplicationQueuesClient replicationQueuesClient;
+  private final ReplicationPeers replicationPeers;
+
+  public ReplicationManager(Configuration conf, ZooKeeperWatcher zkw, Abortable abortable)
+      throws IOException {
+    this.conf = conf;
+    this.zkw = zkw;
+    try {
+      this.replicationQueuesClient = ReplicationFactory
+          .getReplicationQueuesClient(new ReplicationQueuesClientArguments(conf, abortable, zkw));
+      this.replicationQueuesClient.init();
+      this.replicationPeers = ReplicationFactory.getReplicationPeers(zkw, conf,
+        this.replicationQueuesClient, abortable);
+      this.replicationPeers.init();
+    } catch (Exception e) {
+      throw new IOException("Failed to construct ReplicationManager", e);
+    }
+  }
+
+  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException {
+    checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+      peerConfig.getTableCFsMap());
+    this.replicationPeers.registerPeer(peerId, peerConfig);
+  }
+
+  public void removeReplicationPeer(String peerId) throws ReplicationException {
+    this.replicationPeers.unregisterPeer(peerId);
+  }
+
+  /**
+   * Set a namespace in the peer config means that all tables in this namespace
+   * will be replicated to the peer cluster.
+   *
+   * 1. If you already have set a namespace in the peer config, then you can't set any table
+   *    of this namespace to the peer config.
+   * 2. If you already have set a table in the peer config, then you can't set this table's
+   *    namespace to the peer config.
+   *
+   * @param namespaces
+   * @param tableCfs
+   * @throws ReplicationException
+   */
+  private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces,
+      Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException {
+    if (namespaces == null || namespaces.isEmpty()) {
+      return;
+    }
+    if (tableCfs == null || tableCfs.isEmpty()) {
+      return;
+    }
+    for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
+      TableName table = entry.getKey();
+      if (namespaces.contains(table.getNamespaceAsString())) {
+        throw new ReplicationException(
+            "Table-cfs config conflict with namespaces config in peer");
+      }
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index d9afbc8..0452883 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -105,6 +105,7 @@ import org.apache.hadoop.hbase.regionserver.ScannerContext;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.AccessDeniedException;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
@@ -2695,4 +2696,16 @@ public class AccessController extends BaseMasterAndRegionObserver
                                 String groupName) throws IOException {
     requirePermission(getActiveUser(ctx), "balanceRSGroup", Action.ADMIN);
   }
+
+  @Override
+  public void preAddReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId, ReplicationPeerConfig peerConfig) throws IOException {
+    requirePermission(getActiveUser(ctx), "addReplicationPeer", Action.ADMIN);
+  }
+
+  @Override
+  public void preRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+    requirePermission(getActiveUser(ctx), "removeReplicationPeer", Action.ADMIN);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 7363fb9..10c73a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
@@ -76,8 +77,9 @@ public class TestReplicationAdmin {
    */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.startMiniZKCluster();
+    TEST_UTIL.startMiniCluster();
     Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
     admin = new ReplicationAdmin(conf);
   }
 
@@ -86,7 +88,7 @@ public class TestReplicationAdmin {
     if (admin != null) {
       admin.close();
     }
-    TEST_UTIL.shutdownMiniZKCluster();
+    TEST_UTIL.shutdownMiniCluster();
   }
 
   /**
@@ -105,7 +107,7 @@ public class TestReplicationAdmin {
     // try adding the same (fails)
     try {
       admin.addPeer(ID_ONE, rpc1, null);
-    } catch (IllegalArgumentException iae) {
+    } catch (Exception e) {
       // OK!
     }
     assertEquals(1, admin.getPeersCount());
@@ -113,14 +115,14 @@ public class TestReplicationAdmin {
     try {
       admin.removePeer(ID_SECOND);
       fail();
-    } catch (IllegalArgumentException iae) {
+    } catch (Exception iae) {
       // OK!
     }
     assertEquals(1, admin.getPeersCount());
     // Add a second since multi-slave is supported
     try {
       admin.addPeer(ID_SECOND, rpc2, null);
-    } catch (IllegalStateException iae) {
+    } catch (Exception iae) {
       fail();
     }
     assertEquals(2, admin.getPeersCount());
@@ -170,7 +172,7 @@ public class TestReplicationAdmin {
     try {
       admin.addPeer(ID_ONE, rpc1, null);
       fail();
-    } catch (ReplicationException e) {
+    } catch (Exception e) {
       // OK!
     }
     repQueues.removeQueue(ID_ONE);
@@ -181,7 +183,7 @@ public class TestReplicationAdmin {
     try {
       admin.addPeer(ID_ONE, rpc2, null);
       fail();
-    } catch (ReplicationException e) {
+    } catch (Exception e) {
       // OK!
     }
     repQueues.removeAllQueues();
@@ -422,7 +424,7 @@ public class TestReplicationAdmin {
   }
 
   @Test
-  public void testNamespacesAndTableCfsConfigConflict() throws ReplicationException {
+  public void testNamespacesAndTableCfsConfigConflict() throws Exception {
     String ns1 = "ns1";
     String ns2 = "ns2";
     TableName tab1 = TableName.valueOf("ns1:tabl");
@@ -471,7 +473,7 @@ public class TestReplicationAdmin {
   }
 
   @Test
-  public void testPeerBandwidth() throws ReplicationException {
+  public void testPeerBandwidth() throws Exception {
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(KEY_ONE);
     admin.addPeer(ID_ONE, rpc);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index aec4057..55138a0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -40,6 +40,8 @@ import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -380,4 +382,13 @@ public class MockNoopMasterServices implements MasterServices, Server {
   public MasterProcedureManagerHost getMasterProcedureManagerHost() {
     return null;
   }
+
+  @Override
+  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException {
+  }
+
+  @Override
+  public void removeReplicationPeer(String peerId) throws ReplicationException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
index 7f2fb08..2e83c56 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterNoCluster.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.HConnectionTestingUtility;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -275,8 +276,8 @@ public class TestMasterNoCluster {
       void initClusterSchemaService() throws IOException, InterruptedException {}
 
       @Override
-      void initializeZKBasedSystemTrackers() throws IOException,
-      InterruptedException, KeeperException, CoordinatedStateException {
+      void initializeZKBasedSystemTrackers() throws IOException, InterruptedException,
+          KeeperException, CoordinatedStateException {
         super.initializeZKBasedSystemTrackers();
         // Record a newer server in server manager at first
         getServerManager().recordNewServerWithLock(newServer, ServerLoad.EMPTY_SERVERLOAD);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
index b3739fb..474039b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationBase.java
@@ -123,18 +123,18 @@ public class TestReplicationBase {
     utility2 = new HBaseTestingUtility(conf2);
     utility2.setZkCluster(miniZK);
     zkw2 = new ZooKeeperWatcher(conf2, "cluster2", null, true);
-
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(utility2.getClusterKey());
-    admin.addPeer("2", rpc, null);
-
     LOG.info("Setup second Zk");
+
     CONF_WITH_LOCALFS = HBaseConfiguration.create(conf1);
     utility1.startMiniCluster(2);
     // Have a bunch of slave servers, because inter-cluster shipping logic uses number of sinks
     // as a component in deciding maximum number of parallel batches to send to the peer cluster.
     utility2.startMiniCluster(4);
 
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(utility2.getClusterKey());
+    admin.addPeer("2", rpc, null);
+
     HTableDescriptor table = new HTableDescriptor(tableName);
     HColumnDescriptor fam = new HColumnDescriptor(famName);
     fam.setMaxVersions(100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index af0e357..a680f70 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -130,14 +130,14 @@ public class TestReplicationWithTags {
     utility2 = new HBaseTestingUtility(conf2);
     utility2.setZkCluster(miniZK);
 
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(utility2.getClusterKey());
-    replicationAdmin.addPeer("2", rpc, null);
-
     LOG.info("Setup second Zk");
     utility1.startMiniCluster(2);
     utility2.startMiniCluster(2);
 
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(utility2.getClusterKey());
+    replicationAdmin.addPeer("2", rpc, null);
+
     HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
     HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
     fam.setMaxVersions(3);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
index 6fcccaf..c9f4319 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
@@ -106,14 +106,14 @@ public class TestSerialReplication {
     utility2.setZkCluster(miniZK);
     new ZooKeeperWatcher(conf2, "cluster2", null, true);
 
+    utility1.startMiniCluster(1, 10);
+    utility2.startMiniCluster(1, 1);
+
     ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(utility2.getClusterKey());
     admin1.addPeer("1", rpc, null);
 
-    utility1.startMiniCluster(1, 10);
-    utility2.startMiniCluster(1, 1);
-
     utility1.getHBaseAdmin().setBalancerRunning(false, true);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 33ff094..a0f6f29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2870,4 +2870,34 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(action1, SUPERUSER, USER_ADMIN);
     verifyDenied(action1, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
+
+  @Test
+  public void testAddReplicationPeer() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preAddReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+          "test", null);
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
+
+  @Test
+  public void testRemoveReplicationPeer() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preRemoveReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+          "test");
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
index fafa500..56a7260 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
@@ -128,14 +128,16 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
     TEST_UTIL1 = new HBaseTestingUtility(conf1);
     TEST_UTIL1.setZkCluster(miniZK);
     zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(TEST_UTIL1.getClusterKey());
-    replicationAdmin.addPeer("2", rpc, null);
 
     TEST_UTIL.startMiniCluster(1);
     // Wait for the labels table to become available
     TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
     TEST_UTIL1.startMiniCluster(1);
+
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(TEST_UTIL1.getClusterKey());
+    replicationAdmin.addPeer("2", rpc, null);
+
     HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
     HColumnDescriptor desc = new HColumnDescriptor(fam);
     desc.setScope(HConstants.REPLICATION_SCOPE_GLOBAL);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index a62a281..31b74fb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -177,14 +177,16 @@ public class TestVisibilityLabelsReplication {
     TEST_UTIL1 = new HBaseTestingUtility(conf1);
     TEST_UTIL1.setZkCluster(miniZK);
     zkw2 = new ZooKeeperWatcher(conf1, "cluster2", null, true);
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(TEST_UTIL1.getClusterKey());
-    replicationAdmin.addPeer("2", rpc, null);
 
     TEST_UTIL.startMiniCluster(1);
     // Wait for the labels table to become available
     TEST_UTIL.waitTableEnabled(LABELS_TABLE_NAME.getName(), 50000);
     TEST_UTIL1.startMiniCluster(1);
+
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(TEST_UTIL1.getClusterKey());
+    replicationAdmin.addPeer("2", rpc, null);
+
     Admin hBaseAdmin = TEST_UTIL.getHBaseAdmin();
     HTableDescriptor table = new HTableDescriptor(TABLE_NAME);
     HColumnDescriptor desc = new HColumnDescriptor(fam);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index e222875..a71d916 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -116,6 +116,8 @@ In case the table goes out of date, the unit tests which check for accuracy of p
 |        | setUserQuota(Table level) | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 |        | setTableQuota | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 |        | setNamespaceQuota | superuser\|global(A)
+|        | addReplicationPeer | superuser\|global(A)
+|        | removeReplicationPeer | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 |        | closeRegion | superuser\|global(A)
 |        | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)


[07/50] [abbrv] hbase git commit: HBASE-5401 PerformanceEvaluation generates 10x the number of expected mappers (Yi Liang)

Posted by sy...@apache.org.
HBASE-5401 PerformanceEvaluation generates 10x the number of expected mappers (Yi Liang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d787155f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d787155f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d787155f

Branch: refs/heads/hbase-12439
Commit: d787155fd24c576b66663220372dbb7286d5e291
Parents: a1d2ff4
Author: Michael Stack <st...@apache.org>
Authored: Wed Dec 21 11:55:48 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Dec 21 11:55:48 2016 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/PerformanceEvaluation.java     | 24 ++++++++------------
 .../hadoop/hbase/TestPerformanceEvaluation.java |  4 ++--
 2 files changed, 12 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d787155f/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 5439bae..7d94a02 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -509,10 +509,8 @@ public class PerformanceEvaluation extends Configured implements Tool {
   }
 
   /**
-   * Per client, how many tasks will we run?  We divide number of rows by this number and have the
-   * client do the resulting count in a map task.
+   * Each client has one mapper to do the work,  and client do the resulting count in a map task.
    */
-  static int TASKS_PER_CLIENT = 10;
 
   static String JOB_INPUT_FILENAME = "input.txt";
 
@@ -542,17 +540,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
     Hash h = MurmurHash.getInstance();
     int perClientRows = (opts.totalRows / opts.numClientThreads);
     try {
-      for (int i = 0; i < TASKS_PER_CLIENT; i++) {
-        for (int j = 0; j < opts.numClientThreads; j++) {
-          TestOptions next = new TestOptions(opts);
-          next.startRow = (j * perClientRows) + (i * (perClientRows/10));
-          next.perClientRunRows = perClientRows / 10;
-          String s = MAPPER.writeValueAsString(next);
-          LOG.info("Client=" + j + ", maptask=" + i + ", input=" + s);
-          byte[] b = Bytes.toBytes(s);
-          int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1);
-          m.put(hash, s);
-        }
+      for (int j = 0; j < opts.numClientThreads; j++) {
+        TestOptions next = new TestOptions(opts);
+        next.startRow = j * perClientRows;
+        next.perClientRunRows = perClientRows;
+        String s = MAPPER.writeValueAsString(next);
+        LOG.info("Client=" + j + ", input=" + s);
+        byte[] b = Bytes.toBytes(s);
+        int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1);
+        m.put(hash, s);
       }
       for (Map.Entry<Integer, String> e: m.entrySet()) {
         out.println(e.getValue());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d787155f/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
index f774742..cb7fdad 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java
@@ -90,7 +90,7 @@ public class TestPerformanceEvaluation {
       while (br.readLine() != null) {
         count++;
       }
-      assertEquals(clients * PerformanceEvaluation.TASKS_PER_CLIENT, count);
+      assertEquals(clients, count);
     } finally {
       dis.close();
     }
@@ -183,4 +183,4 @@ public class TestPerformanceEvaluation {
       assertTrue(e.getCause() instanceof NoSuchElementException);
     }
   }
-}
\ No newline at end of file
+}


[21/50] [abbrv] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

Posted by sy...@apache.org.
HBASE-16010 Put draining function through Admin API (Matt Warhaftig)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/992e5717
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/992e5717
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/992e5717

Branch: refs/heads/hbase-12439
Commit: 992e5717d4e4deeef46836acea323a312b1e0851
Parents: 8fb9a91
Author: Jerry He <je...@apache.org>
Authored: Fri Dec 23 13:41:36 2016 -0800
Committer: Jerry He <je...@apache.org>
Committed: Fri Dec 23 13:41:36 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |   19 +
 .../hbase/client/ConnectionImplementation.java  |   25 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   58 +
 .../shaded/protobuf/generated/MasterProtos.java | 3995 +++++++++++++++++-
 .../src/main/protobuf/Master.proto              |   33 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   50 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   51 +
 .../hadoop/hbase/master/MasterServices.java     |   20 +
 .../apache/hadoop/hbase/client/TestAdmin2.java  |   77 +
 .../hbase/master/MockNoopMasterServices.java    |   15 +
 .../hbase/zookeeper/TestZooKeeperACL.java       |   21 +
 11 files changed, 4217 insertions(+), 147 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index e7ea4d9..fe3960f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1858,4 +1858,23 @@ public interface Admin extends Abortable, Closeable {
    */
   default void disableReplicationPeer(final String peerId) throws IOException {
   }
+
+  /**
+   * Mark a region server as draining to prevent additional regions from getting assigned to it.
+   * @param servers List of region servers to drain.
+   */
+  void drainRegionServers(List<ServerName> servers) throws IOException;
+
+  /**
+   * List region servers marked as draining to not get additional regions assigned to them.
+   * @return List of draining region servers.
+   */
+  List<ServerName> listDrainingRegionServers() throws IOException;
+
+  /**
+   * Remove drain from a region server to allow additional regions assignments.
+   * @param servers List of region servers to remove drain from.
+   */
+  void removeDrainFromRegionServers(List<ServerName> servers) throws IOException;
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index ff939aa..a597be3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -78,12 +78,18 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.BlockingInterface;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
@@ -1669,6 +1675,25 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
           DisableReplicationPeerRequest request) throws ServiceException {
         return stub.disableReplicationPeer(controller, request);
       }
+
+      @Override
+      public ListDrainingRegionServersResponse listDrainingRegionServers(RpcController controller,
+          ListDrainingRegionServersRequest request) throws ServiceException {
+        return stub.listDrainingRegionServers(controller, request);
+      }
+
+      @Override
+      public DrainRegionServersResponse drainRegionServers(RpcController controller,
+          DrainRegionServersRequest request) throws ServiceException {
+        return stub.drainRegionServers(controller, request);
+      }
+
+      @Override
+      public RemoveDrainFromRegionServersResponse removeDrainFromRegionServers(
+          RpcController controller, RemoveDrainFromRegionServersRequest request)
+          throws ServiceException {
+        return stub.removeDrainFromRegionServers(controller, request);
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 61f7435..ec4a5c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -120,6 +120,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTabl
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
@@ -140,6 +141,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
@@ -155,6 +157,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyName
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
@@ -3794,4 +3797,59 @@ public class HBaseAdmin implements Admin {
       }
     });
   }
+
+  @Override
+  public void drainRegionServers(List<ServerName> servers) throws IOException {
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
+    for (ServerName server : servers) {
+      // Parse to ServerName to do simple validation.
+      ServerName.parseServerName(server.toString());
+      pbServers.add(ProtobufUtil.toServerName(server));
+    }
+
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      public Void rpcCall() throws ServiceException {
+        DrainRegionServersRequest req =
+            DrainRegionServersRequest.newBuilder().addAllServerName(pbServers).build();
+        master.drainRegionServers(getRpcController(), req);
+        return null;
+      }
+    });
+  }
+
+  @Override
+  public List<ServerName> listDrainingRegionServers() throws IOException {
+    return executeCallable(new MasterCallable<List<ServerName>>(getConnection(),
+              getRpcControllerFactory()) {
+      @Override
+      public List<ServerName> rpcCall() throws ServiceException {
+        ListDrainingRegionServersRequest req = ListDrainingRegionServersRequest.newBuilder().build();
+        List<ServerName> servers = new ArrayList<ServerName>();
+        for (HBaseProtos.ServerName server : master.listDrainingRegionServers(null, req)
+            .getServerNameList()) {
+          servers.add(ProtobufUtil.toServerName(server));
+        }
+        return servers;
+      }
+    });
+  }
+
+  @Override
+  public void removeDrainFromRegionServers(List<ServerName> servers) throws IOException {
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
+    for (ServerName server : servers) {
+      pbServers.add(ProtobufUtil.toServerName(server));
+    }
+
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      public Void rpcCall() throws ServiceException {
+        RemoveDrainFromRegionServersRequest req = RemoveDrainFromRegionServersRequest.newBuilder()
+            .addAllServerName(pbServers).build();
+        master.removeDrainFromRegionServers(getRpcController(), req);
+        return null;
+      }
+    });
+  }
 }


[16/50] [abbrv] hbase git commit: HBASE-17335 enable/disable replication peer requests should be routed through master

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
index c91796d..7a17985 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
@@ -2075,6 +2075,1854 @@ public final class ReplicationProtos {
 
   }
 
+  public interface EnableReplicationPeerRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.EnableReplicationPeerRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.EnableReplicationPeerRequest}
+   */
+  public  static final class EnableReplicationPeerRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.EnableReplicationPeerRequest)
+      EnableReplicationPeerRequestOrBuilder {
+    // Use EnableReplicationPeerRequest.newBuilder() to construct.
+    private EnableReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private EnableReplicationPeerRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EnableReplicationPeerRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.EnableReplicationPeerRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.EnableReplicationPeerRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.EnableReplicationPeerRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.EnableReplicationPeerRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<EnableReplicationPeerRequest>() {
+      public EnableReplicationPeerRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new EnableReplicationPeerRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface EnableReplicationPeerResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.EnableReplicationPeerResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.EnableReplicationPeerResponse}
+   */
+  public  static final class EnableReplicationPeerResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.EnableReplicationPeerResponse)
+      EnableReplicationPeerResponseOrBuilder {
+    // Use EnableReplicationPeerResponse.newBuilder() to construct.
+    private EnableReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private EnableReplicationPeerResponse() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private EnableReplicationPeerResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.EnableReplicationPeerResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.EnableReplicationPeerResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.EnableReplicationPeerResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.EnableReplicationPeerResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<EnableReplicationPeerResponse>() {
+      public EnableReplicationPeerResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new EnableReplicationPeerResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<EnableReplicationPeerResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface DisableReplicationPeerRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.DisableReplicationPeerRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.DisableReplicationPeerRequest}
+   */
+  public  static final class DisableReplicationPeerRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.DisableReplicationPeerRequest)
+      DisableReplicationPeerRequestOrBuilder {
+    // Use DisableReplicationPeerRequest.newBuilder() to construct.
+    private DisableReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private DisableReplicationPeerRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private DisableReplicationPeerRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.DisableReplicationPeerRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.DisableReplicationPeerRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DisableReplicationPeerRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.DisableReplicationPeerRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DisableReplicationPeerRequest>() {
+      public DisableReplicationPeerRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new DisableReplicationPeerRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface DisableReplicationPeerResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.DisableReplicationPeerResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.DisableReplicationPeerResponse}
+   */
+  public  static final class DisableReplicationPeerResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.DisableReplicationPeerResponse)
+      DisableReplicationPeerResponseOrBuilder {
+    // Use DisableReplicationPeerResponse.newBuilder() to construct.
+    private DisableReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private DisableReplicationPeerResponse() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private DisableReplicationPeerResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.DisableReplicationPeerResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.DisableReplicationPeerResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_DisableReplicationPeerResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DisableReplicationPeerResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.DisableReplicationPeerResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DisableReplicationPeerResponse>() {
+      public DisableReplicationPeerResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new DisableReplicationPeerResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DisableReplicationPeerResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
   private static final 
@@ -2095,6 +3943,26 @@ public final class ReplicationProtos {
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_EnableReplicationPeerRequest_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_EnableReplicationPeerRequest_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_EnableReplicationPeerResponse_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_EnableReplicationPeerResponse_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_DisableReplicationPeerRequest_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Fie

<TRUNCATED>

[41/50] [abbrv] hbase git commit: HBASE-17387 Reduce the overhead of exception report in RegionActionResult for multi()

Posted by sy...@apache.org.
HBASE-17387 Reduce the overhead of exception report in RegionActionResult for multi()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c477b2d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c477b2d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c477b2d

Branch: refs/heads/hbase-12439
Commit: 1c477b2df9f3cb10063d66d8f14ba9eb83bf9f4c
Parents: 7572e96
Author: tedyu <yu...@gmail.com>
Authored: Thu Dec 29 17:54:02 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Thu Dec 29 17:54:02 2016 -0800

----------------------------------------------------------------------
 .../hbase/regionserver/RSRpcServices.java       | 21 +++++++++++---------
 1 file changed, 12 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c477b2d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 27fef8d..f550267 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -719,8 +719,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
     IOException sizeIOE = null;
     Object lastBlock = null;
+    ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
+    boolean hasResultOrException = false;
     for (ClientProtos.Action action : actions.getActionList()) {
-      ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = null;
+      hasResultOrException = false;
+      resultOrExceptionBuilder.clear();
       try {
         Result r = null;
 
@@ -749,8 +752,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           // use it for the response.
           //
           // This will create a copy in the builder.
-          resultOrExceptionBuilder = ResultOrException.newBuilder().
-              setException(ResponseConverter.buildException(sizeIOE));
+          hasResultOrException = true;
+          resultOrExceptionBuilder.setException(ResponseConverter.buildException(sizeIOE));
           resultOrExceptionBuilder.setIndex(action.getIndex());
           builder.addResultOrException(resultOrExceptionBuilder.build());
           if (cellScanner != null) {
@@ -774,7 +777,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             }
           }
         } else if (action.hasServiceCall()) {
-          resultOrExceptionBuilder = ResultOrException.newBuilder();
+          hasResultOrException = true;
           try {
             com.google.protobuf.Message result =
                 execServiceOnRegion(region, action.getServiceCall());
@@ -832,8 +835,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             pbResult = ProtobufUtil.toResult(r);
           }
           lastBlock = addSize(context, r, lastBlock);
-          resultOrExceptionBuilder =
-            ClientProtos.ResultOrException.newBuilder().setResult(pbResult);
+          hasResultOrException = true;
+          resultOrExceptionBuilder.setResult(pbResult);
         }
         // Could get to here and there was no result and no exception.  Presumes we added
         // a Put or Delete to the collecting Mutations List for adding later.  In this
@@ -841,10 +844,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         // down in the doBatchOp method call rather than up here.
       } catch (IOException ie) {
         rpcServer.getMetrics().exception(ie);
-        resultOrExceptionBuilder = ResultOrException.newBuilder().
-          setException(ResponseConverter.buildException(ie));
+        hasResultOrException = true;
+        resultOrExceptionBuilder.setException(ResponseConverter.buildException(ie));
       }
-      if (resultOrExceptionBuilder != null) {
+      if (hasResultOrException) {
         // Propagate index.
         resultOrExceptionBuilder.setIndex(action.getIndex());
         builder.addResultOrException(resultOrExceptionBuilder.build());


[27/50] [abbrv] hbase git commit: HBASE-17345 Implement batch

Posted by sy...@apache.org.
HBASE-17345 Implement batch


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8fa5b0b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8fa5b0b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8fa5b0b9

Branch: refs/heads/hbase-12439
Commit: 8fa5b0b946c01516076fa944a310b33224ff21a9
Parents: 8da7366
Author: zhangduo <zh...@apache.org>
Authored: Thu Dec 22 19:42:15 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sun Dec 25 20:36:52 2016 +0800

----------------------------------------------------------------------
 .../client/AsyncBatchRpcRetryingCaller.java     | 476 +++++++++++++++++++
 .../client/AsyncMultiGetRpcRetryingCaller.java  | 407 ----------------
 .../client/AsyncRpcRetryingCallerFactory.java   |  39 +-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |   2 +-
 .../AsyncSingleRequestRpcRetryingCaller.java    |   2 +-
 .../hadoop/hbase/client/AsyncTableBase.java     | 103 +++-
 .../hadoop/hbase/client/AsyncTableImpl.java     |   4 +-
 .../hadoop/hbase/client/ConnectionUtils.java    |  62 ++-
 .../hadoop/hbase/client/RawAsyncTableImpl.java  |   7 +-
 .../hbase/shaded/protobuf/RequestConverter.java |   5 +-
 .../client/AbstractTestAsyncTableScan.java      |  12 +-
 .../hbase/client/TestAsyncGetMultiThread.java   | 150 ------
 .../hbase/client/TestAsyncTableBatch.java       | 236 +++++++++
 .../client/TestAsyncTableGetMultiThreaded.java  | 149 ++++++
 .../hbase/client/TestAsyncTableMultiGet.java    | 163 -------
 15 files changed, 1032 insertions(+), 785 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
new file mode 100644
index 0000000..6f0b8e9
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -0,0 +1,476 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.CellUtil.createCellScanner;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
+import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
+
+import io.netty.util.HashedWheelTimer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.IdentityHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Supplier;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.CellScannable;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
+import org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.util.AtomicUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Retry caller for batch.
+ * <p>
+ * Notice that, the {@link #operationTimeoutNs} is the total time limit now which is the same with
+ * other single operations
+ * <p>
+ * And the {@link #maxAttempts} is a limit for each single operation in the batch logically. In the
+ * implementation, we will record a {@code tries} parameter for each operation group, and if it is
+ * split to several groups when retrying, the sub groups will inherit the {@code tries}. You can
+ * imagine that the whole retrying process is a tree, and the {@link #maxAttempts} is the limit of
+ * the depth of the tree.
+ */
+@InterfaceAudience.Private
+class AsyncBatchRpcRetryingCaller<T> {
+
+  private static final Log LOG = LogFactory.getLog(AsyncBatchRpcRetryingCaller.class);
+
+  private final HashedWheelTimer retryTimer;
+
+  private final AsyncConnectionImpl conn;
+
+  private final TableName tableName;
+
+  private final List<Action> actions;
+
+  private final List<CompletableFuture<T>> futures;
+
+  private final IdentityHashMap<Action, CompletableFuture<T>> action2Future;
+
+  private final IdentityHashMap<Action, List<ThrowableWithExtraContext>> action2Errors;
+
+  private final long pauseNs;
+
+  private final int maxAttempts;
+
+  private final long operationTimeoutNs;
+
+  private final long readRpcTimeoutNs;
+
+  private final long writeRpcTimeoutNs;
+
+  private final int startLogErrorsCnt;
+
+  private final long startNs;
+
+  // we can not use HRegionLocation as the map key because the hashCode and equals method of
+  // HRegionLocation only consider serverName.
+  private static final class RegionRequest {
+
+    public final HRegionLocation loc;
+
+    public final ConcurrentLinkedQueue<Action> actions = new ConcurrentLinkedQueue<>();
+
+    public RegionRequest(HRegionLocation loc) {
+      this.loc = loc;
+    }
+  }
+
+  private static final class ServerRequest {
+
+    public final ConcurrentMap<byte[], RegionRequest> actionsByRegion =
+        new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
+
+    public final AtomicLong rpcTimeoutNs;
+
+    public ServerRequest(long defaultRpcTimeoutNs) {
+      this.rpcTimeoutNs = new AtomicLong(defaultRpcTimeoutNs);
+    }
+
+    public void addAction(HRegionLocation loc, Action action, long rpcTimeoutNs) {
+      computeIfAbsent(actionsByRegion, loc.getRegionInfo().getRegionName(),
+        () -> new RegionRequest(loc)).actions.add(action);
+      // try update the timeout to a larger value
+      if (this.rpcTimeoutNs.get() <= 0) {
+        return;
+      }
+      if (rpcTimeoutNs <= 0) {
+        this.rpcTimeoutNs.set(-1L);
+        return;
+      }
+      AtomicUtils.updateMax(this.rpcTimeoutNs, rpcTimeoutNs);
+    }
+  }
+
+  public AsyncBatchRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
+      TableName tableName, List<? extends Row> actions, long pauseNs, int maxRetries,
+      long operationTimeoutNs, long readRpcTimeoutNs, long writeRpcTimeoutNs,
+      int startLogErrorsCnt) {
+    this.retryTimer = retryTimer;
+    this.conn = conn;
+    this.tableName = tableName;
+    this.pauseNs = pauseNs;
+    this.maxAttempts = retries2Attempts(maxRetries);
+    this.operationTimeoutNs = operationTimeoutNs;
+    this.readRpcTimeoutNs = readRpcTimeoutNs;
+    this.writeRpcTimeoutNs = writeRpcTimeoutNs;
+    this.startLogErrorsCnt = startLogErrorsCnt;
+
+    this.actions = new ArrayList<>(actions.size());
+    this.futures = new ArrayList<>(actions.size());
+    this.action2Future = new IdentityHashMap<>(actions.size());
+    for (int i = 0, n = actions.size(); i < n; i++) {
+      Row rawAction = actions.get(i);
+      Action action = new Action(rawAction, i);
+      if (rawAction instanceof Append || rawAction instanceof Increment) {
+        action.setNonce(conn.getNonceGenerator().newNonce());
+      }
+      this.actions.add(action);
+      CompletableFuture<T> future = new CompletableFuture<>();
+      futures.add(future);
+      action2Future.put(action, future);
+    }
+    this.action2Errors = new IdentityHashMap<>();
+    this.startNs = System.nanoTime();
+  }
+
+  private long remainingTimeNs() {
+    return operationTimeoutNs - (System.nanoTime() - startNs);
+  }
+
+  private List<ThrowableWithExtraContext> removeErrors(Action action) {
+    synchronized (action2Errors) {
+      return action2Errors.remove(action);
+    }
+  }
+
+  private void logException(int tries, Supplier<Stream<RegionRequest>> regionsSupplier,
+      Throwable error, ServerName serverName) {
+    if (tries > startLogErrorsCnt) {
+      String regions =
+          regionsSupplier.get().map(r -> "'" + r.loc.getRegionInfo().getRegionNameAsString() + "'")
+              .collect(Collectors.joining(",", "[", "]"));
+      LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName
+          + " failed, tries=" + tries,
+        error);
+    }
+  }
+
+  private String getExtraContextForError(ServerName serverName) {
+    return serverName != null ? serverName.getServerName() : "";
+  }
+
+  private void addError(Action action, Throwable error, ServerName serverName) {
+    List<ThrowableWithExtraContext> errors;
+    synchronized (action2Errors) {
+      errors = action2Errors.computeIfAbsent(action, k -> new ArrayList<>());
+    }
+    errors.add(new ThrowableWithExtraContext(error, EnvironmentEdgeManager.currentTime(),
+        getExtraContextForError(serverName)));
+  }
+
+  private void addError(Iterable<Action> actions, Throwable error, ServerName serverName) {
+    actions.forEach(action -> addError(action, error, serverName));
+  }
+
+  private void failOne(Action action, int tries, Throwable error, long currentTime, String extras) {
+    CompletableFuture<T> future = action2Future.get(action);
+    if (future.isDone()) {
+      return;
+    }
+    ThrowableWithExtraContext errorWithCtx =
+        new ThrowableWithExtraContext(error, currentTime, extras);
+    List<ThrowableWithExtraContext> errors = removeErrors(action);
+    if (errors == null) {
+      errors = Collections.singletonList(errorWithCtx);
+    } else {
+      errors.add(errorWithCtx);
+    }
+    future.completeExceptionally(new RetriesExhaustedException(tries - 1, errors));
+  }
+
+  private void failAll(Stream<Action> actions, int tries, Throwable error, ServerName serverName) {
+    long currentTime = EnvironmentEdgeManager.currentTime();
+    String extras = getExtraContextForError(serverName);
+    actions.forEach(action -> failOne(action, tries, error, currentTime, extras));
+  }
+
+  private void failAll(Stream<Action> actions, int tries) {
+    actions.forEach(action -> {
+      CompletableFuture<T> future = action2Future.get(action);
+      if (future.isDone()) {
+        return;
+      }
+      future.completeExceptionally(new RetriesExhaustedException(tries,
+          Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList())));
+    });
+  }
+
+  private ClientProtos.MultiRequest buildReq(Map<byte[], RegionRequest> actionsByRegion,
+      List<CellScannable> cells) throws IOException {
+    ClientProtos.MultiRequest.Builder multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
+    ClientProtos.RegionAction.Builder regionActionBuilder = ClientProtos.RegionAction.newBuilder();
+    ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
+    ClientProtos.MutationProto.Builder mutationBuilder = ClientProtos.MutationProto.newBuilder();
+    for (Map.Entry<byte[], RegionRequest> entry : actionsByRegion.entrySet()) {
+      // TODO: remove the extra for loop as we will iterate it in mutationBuilder.
+      if (!multiRequestBuilder.hasNonceGroup()) {
+        for (Action action : entry.getValue().actions) {
+          if (action.hasNonce()) {
+            multiRequestBuilder.setNonceGroup(conn.getNonceGenerator().getNonceGroup());
+            break;
+          }
+        }
+      }
+      regionActionBuilder.clear();
+      regionActionBuilder.setRegion(
+        RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey()));
+      regionActionBuilder = RequestConverter.buildNoDataRegionAction(entry.getKey(),
+        entry.getValue().actions, cells, regionActionBuilder, actionBuilder, mutationBuilder);
+      multiRequestBuilder.addRegionAction(regionActionBuilder.build());
+    }
+    return multiRequestBuilder.build();
+  }
+
+  @SuppressWarnings("unchecked")
+  private void onComplete(Action action, RegionRequest regionReq, int tries, ServerName serverName,
+      RegionResult regionResult, List<Action> failedActions) {
+    Object result = regionResult.result.get(action.getOriginalIndex());
+    if (result == null) {
+      LOG.error("Server " + serverName + " sent us neither result nor exception for row '"
+          + Bytes.toStringBinary(action.getAction().getRow()) + "' of "
+          + regionReq.loc.getRegionInfo().getRegionNameAsString());
+      addError(action, new RuntimeException("Invalid response"), serverName);
+      failedActions.add(action);
+    } else if (result instanceof Throwable) {
+      Throwable error = translateException((Throwable) result);
+      logException(tries, () -> Stream.of(regionReq), error, serverName);
+      if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
+        failOne(action, tries, error, EnvironmentEdgeManager.currentTime(),
+          getExtraContextForError(serverName));
+      } else {
+        failedActions.add(action);
+      }
+    } else {
+      action2Future.get(action).complete((T) result);
+    }
+  }
+
+  private void onComplete(Map<byte[], RegionRequest> actionsByRegion, int tries,
+      ServerName serverName, MultiResponse resp) {
+    List<Action> failedActions = new ArrayList<>();
+    actionsByRegion.forEach((rn, regionReq) -> {
+      RegionResult regionResult = resp.getResults().get(rn);
+      if (regionResult != null) {
+        regionReq.actions.forEach(
+          action -> onComplete(action, regionReq, tries, serverName, regionResult, failedActions));
+      } else {
+        Throwable t = resp.getException(rn);
+        Throwable error;
+        if (t == null) {
+          LOG.error(
+            "Server sent us neither results nor exceptions for " + Bytes.toStringBinary(rn));
+          error = new RuntimeException("Invalid response");
+        } else {
+          error = translateException(t);
+          logException(tries, () -> Stream.of(regionReq), error, serverName);
+          conn.getLocator().updateCachedLocation(regionReq.loc, error);
+          if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
+            failAll(regionReq.actions.stream(), tries, error, serverName);
+            return;
+          }
+          addError(regionReq.actions, error, serverName);
+          failedActions.addAll(regionReq.actions);
+        }
+      }
+    });
+    if (!failedActions.isEmpty()) {
+      tryResubmit(failedActions.stream(), tries);
+    }
+  }
+
+  private void send(Map<ServerName, ServerRequest> actionsByServer, int tries) {
+    long remainingNs;
+    if (operationTimeoutNs > 0) {
+      remainingNs = remainingTimeNs();
+      if (remainingNs <= 0) {
+        failAll(actionsByServer.values().stream().flatMap(m -> m.actionsByRegion.values().stream())
+            .flatMap(r -> r.actions.stream()),
+          tries);
+        return;
+      }
+    } else {
+      remainingNs = Long.MAX_VALUE;
+    }
+    actionsByServer.forEach((sn, serverReq) -> {
+      ClientService.Interface stub;
+      try {
+        stub = conn.getRegionServerStub(sn);
+      } catch (IOException e) {
+        onError(serverReq.actionsByRegion, tries, e, sn);
+        return;
+      }
+      ClientProtos.MultiRequest req;
+      List<CellScannable> cells = new ArrayList<>();
+      try {
+        req = buildReq(serverReq.actionsByRegion, cells);
+      } catch (IOException e) {
+        onError(serverReq.actionsByRegion, tries, e, sn);
+        return;
+      }
+      HBaseRpcController controller = conn.rpcControllerFactory.newController();
+      resetController(controller, Math.min(serverReq.rpcTimeoutNs.get(), remainingNs));
+      if (!cells.isEmpty()) {
+        controller.setCellScanner(createCellScanner(cells));
+      }
+      stub.multi(controller, req, resp -> {
+        if (controller.failed()) {
+          onError(serverReq.actionsByRegion, tries, controller.getFailed(), sn);
+        } else {
+          try {
+            onComplete(serverReq.actionsByRegion, tries, sn,
+              ResponseConverter.getResults(req, resp, controller.cellScanner()));
+          } catch (Exception e) {
+            onError(serverReq.actionsByRegion, tries, e, sn);
+            return;
+          }
+        }
+      });
+    });
+  }
+
+  private void onError(Map<byte[], RegionRequest> actionsByRegion, int tries, Throwable t,
+      ServerName serverName) {
+    Throwable error = translateException(t);
+    logException(tries, () -> actionsByRegion.values().stream(), error, serverName);
+    if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
+      failAll(actionsByRegion.values().stream().flatMap(r -> r.actions.stream()), tries, error,
+        serverName);
+      return;
+    }
+    List<Action> copiedActions = actionsByRegion.values().stream().flatMap(r -> r.actions.stream())
+        .collect(Collectors.toList());
+    addError(copiedActions, error, serverName);
+    tryResubmit(copiedActions.stream(), tries);
+  }
+
+  private void tryResubmit(Stream<Action> actions, int tries) {
+    long delayNs;
+    if (operationTimeoutNs > 0) {
+      long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS;
+      if (maxDelayNs <= 0) {
+        failAll(actions, tries);
+        return;
+      }
+      delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1));
+    } else {
+      delayNs = getPauseTime(pauseNs, tries - 1);
+    }
+    retryTimer.newTimeout(t -> groupAndSend(actions, tries + 1), delayNs, TimeUnit.NANOSECONDS);
+  }
+
+  private long getRpcTimeoutNs(Action action) {
+    return action.getAction() instanceof Get ? readRpcTimeoutNs : writeRpcTimeoutNs;
+  }
+
+  private void groupAndSend(Stream<Action> actions, int tries) {
+    long locateTimeoutNs;
+    if (operationTimeoutNs > 0) {
+      locateTimeoutNs = remainingTimeNs();
+      if (locateTimeoutNs <= 0) {
+        failAll(actions, tries);
+        return;
+      }
+    } else {
+      locateTimeoutNs = -1L;
+    }
+    ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>();
+    ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>();
+    // use the small one as the default timeout value, and increase the timeout value if we have an
+    // action in the group needs a larger timeout value.
+    long defaultRpcTimeoutNs;
+    if (readRpcTimeoutNs > 0) {
+      defaultRpcTimeoutNs =
+          writeRpcTimeoutNs > 0 ? Math.min(readRpcTimeoutNs, writeRpcTimeoutNs) : readRpcTimeoutNs;
+    } else {
+      defaultRpcTimeoutNs = writeRpcTimeoutNs > 0 ? writeRpcTimeoutNs : -1L;
+    }
+    CompletableFuture.allOf(actions
+        .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(),
+          RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> {
+            if (error != null) {
+              error = translateException(error);
+              if (error instanceof DoNotRetryIOException) {
+                failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), "");
+                return;
+              }
+              addError(action, error, null);
+              locateFailed.add(action);
+            } else {
+              computeIfAbsent(actionsByServer, loc.getServerName(),
+                () -> new ServerRequest(defaultRpcTimeoutNs)).addAction(loc, action,
+                  getRpcTimeoutNs(action));
+            }
+          }))
+        .toArray(CompletableFuture[]::new)).whenComplete((v, r) -> {
+          if (!actionsByServer.isEmpty()) {
+            send(actionsByServer, tries);
+          }
+          if (!locateFailed.isEmpty()) {
+            tryResubmit(locateFailed.stream(), tries);
+          }
+        });
+  }
+
+  public List<CompletableFuture<T>> call() {
+    groupAndSend(actions.stream(), 1);
+    return futures;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
deleted file mode 100644
index e1208c2..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
+++ /dev/null
@@ -1,407 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
-import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
-
-import io.netty.util.HashedWheelTimer;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.IdentityHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HRegionLocation;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.MultiResponse.RegionResult;
-import org.apache.hadoop.hbase.client.RetriesExhaustedException.ThrowableWithExtraContext;
-import org.apache.hadoop.hbase.ipc.HBaseRpcController;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/**
- * Retry caller for multi get.
- * <p>
- * Notice that, the {@link #operationTimeoutNs} is the total time limit now which is the same with
- * other single operations
- * <p>
- * And the {@link #maxAttempts} is a limit for each single get in the batch logically. In the
- * implementation, we will record a {@code tries} parameter for each operation group, and if it is
- * split to several groups when retrying, the sub groups will inherit {@code tries}. You can imagine
- * that the whole retrying process is a tree, and the {@link #maxAttempts} is the limit of the depth
- * of the tree.
- */
-@InterfaceAudience.Private
-class AsyncMultiGetRpcRetryingCaller {
-
-  private static final Log LOG = LogFactory.getLog(AsyncMultiGetRpcRetryingCaller.class);
-
-  private final HashedWheelTimer retryTimer;
-
-  private final AsyncConnectionImpl conn;
-
-  private final TableName tableName;
-
-  private final List<Get> gets;
-
-  private final List<CompletableFuture<Result>> futures;
-
-  private final IdentityHashMap<Get, CompletableFuture<Result>> get2Future;
-
-  private final IdentityHashMap<Get, List<ThrowableWithExtraContext>> get2Errors;
-
-  private final long pauseNs;
-
-  private final int maxAttempts;
-
-  private final long operationTimeoutNs;
-
-  private final long rpcTimeoutNs;
-
-  private final int startLogErrorsCnt;
-
-  private final long startNs;
-
-  // we can not use HRegionLocation as the map key because the hashCode and equals method of
-  // HRegionLocation only consider serverName.
-  private static final class RegionRequest {
-
-    public final HRegionLocation loc;
-
-    public final ConcurrentLinkedQueue<Get> gets = new ConcurrentLinkedQueue<>();
-
-    public RegionRequest(HRegionLocation loc) {
-      this.loc = loc;
-    }
-  }
-
-  public AsyncMultiGetRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
-      TableName tableName, List<Get> gets, long pauseNs, int maxRetries, long operationTimeoutNs,
-      long rpcTimeoutNs, int startLogErrorsCnt) {
-    this.retryTimer = retryTimer;
-    this.conn = conn;
-    this.tableName = tableName;
-    this.gets = gets;
-    this.pauseNs = pauseNs;
-    this.maxAttempts = retries2Attempts(maxRetries);
-    this.operationTimeoutNs = operationTimeoutNs;
-    this.rpcTimeoutNs = rpcTimeoutNs;
-    this.startLogErrorsCnt = startLogErrorsCnt;
-
-    this.futures = new ArrayList<>(gets.size());
-    this.get2Future = new IdentityHashMap<>(gets.size());
-    gets.forEach(
-      get -> futures.add(get2Future.computeIfAbsent(get, k -> new CompletableFuture<>())));
-    this.get2Errors = new IdentityHashMap<>();
-    this.startNs = System.nanoTime();
-  }
-
-  private long remainingTimeNs() {
-    return operationTimeoutNs - (System.nanoTime() - startNs);
-  }
-
-  private List<ThrowableWithExtraContext> removeErrors(Get get) {
-    synchronized (get2Errors) {
-      return get2Errors.remove(get);
-    }
-  }
-
-  private void logException(int tries, Supplier<Stream<RegionRequest>> regionsSupplier,
-      Throwable error, ServerName serverName) {
-    if (tries > startLogErrorsCnt) {
-      String regions =
-          regionsSupplier.get().map(r -> "'" + r.loc.getRegionInfo().getRegionNameAsString() + "'")
-              .collect(Collectors.joining(",", "[", "]"));
-      LOG.warn("Get data for " + regions + " in " + tableName + " from " + serverName
-          + " failed, tries=" + tries,
-        error);
-    }
-  }
-
-  private String getExtras(ServerName serverName) {
-    return serverName != null ? serverName.getServerName() : "";
-  }
-
-  private void addError(Get get, Throwable error, ServerName serverName) {
-    List<ThrowableWithExtraContext> errors;
-    synchronized (get2Errors) {
-      errors = get2Errors.computeIfAbsent(get, k -> new ArrayList<>());
-    }
-    errors.add(new ThrowableWithExtraContext(error, EnvironmentEdgeManager.currentTime(),
-        serverName != null ? serverName.toString() : ""));
-  }
-
-  private void addError(Iterable<Get> gets, Throwable error, ServerName serverName) {
-    gets.forEach(get -> addError(get, error, serverName));
-  }
-
-  private void failOne(Get get, int tries, Throwable error, long currentTime, String extras) {
-    CompletableFuture<Result> future = get2Future.get(get);
-    if (future.isDone()) {
-      return;
-    }
-    ThrowableWithExtraContext errorWithCtx =
-        new ThrowableWithExtraContext(error, currentTime, extras);
-    List<ThrowableWithExtraContext> errors = removeErrors(get);
-    if (errors == null) {
-      errors = Collections.singletonList(errorWithCtx);
-    } else {
-      errors.add(errorWithCtx);
-    }
-    future.completeExceptionally(new RetriesExhaustedException(tries, errors));
-  }
-
-  private void failAll(Stream<Get> gets, int tries, Throwable error, ServerName serverName) {
-    long currentTime = System.currentTimeMillis();
-    String extras = getExtras(serverName);
-    gets.forEach(get -> failOne(get, tries, error, currentTime, extras));
-  }
-
-  private void failAll(Stream<Get> gets, int tries) {
-    gets.forEach(get -> {
-      CompletableFuture<Result> future = get2Future.get(get);
-      if (future.isDone()) {
-        return;
-      }
-      future.completeExceptionally(new RetriesExhaustedException(tries,
-          Optional.ofNullable(removeErrors(get)).orElse(Collections.emptyList())));
-    });
-  }
-
-  private ClientProtos.MultiRequest buildReq(Map<byte[], RegionRequest> getsByRegion)
-      throws IOException {
-    ClientProtos.MultiRequest.Builder multiRequestBuilder = ClientProtos.MultiRequest.newBuilder();
-    for (Map.Entry<byte[], RegionRequest> entry : getsByRegion.entrySet()) {
-      ClientProtos.RegionAction.Builder regionActionBuilder =
-          ClientProtos.RegionAction.newBuilder().setRegion(
-            RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey()));
-      int index = 0;
-      for (Get get : entry.getValue().gets) {
-        regionActionBuilder.addAction(
-          ClientProtos.Action.newBuilder().setIndex(index).setGet(ProtobufUtil.toGet(get)));
-        index++;
-      }
-      multiRequestBuilder.addRegionAction(regionActionBuilder);
-    }
-    return multiRequestBuilder.build();
-  }
-
-  private void onComplete(Map<byte[], RegionRequest> getsByRegion, int tries, ServerName serverName,
-      MultiResponse resp) {
-    List<Get> failedGets = new ArrayList<>();
-    getsByRegion.forEach((rn, regionReq) -> {
-      RegionResult regionResult = resp.getResults().get(rn);
-      if (regionResult != null) {
-        int index = 0;
-        for (Get get : regionReq.gets) {
-          Object result = regionResult.result.get(index);
-          if (result == null) {
-            LOG.error("Server sent us neither result nor exception for row '"
-                + Bytes.toStringBinary(get.getRow()) + "' of " + Bytes.toStringBinary(rn));
-            addError(get, new RuntimeException("Invalid response"), serverName);
-            failedGets.add(get);
-          } else if (result instanceof Throwable) {
-            Throwable error = translateException((Throwable) result);
-            logException(tries, () -> Stream.of(regionReq), error, serverName);
-            if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
-              failOne(get, tries, error, EnvironmentEdgeManager.currentTime(),
-                getExtras(serverName));
-            } else {
-              failedGets.add(get);
-            }
-          } else {
-            get2Future.get(get).complete((Result) result);
-          }
-          index++;
-        }
-      } else {
-        Throwable t = resp.getException(rn);
-        Throwable error;
-        if (t == null) {
-          LOG.error(
-            "Server sent us neither results nor exceptions for " + Bytes.toStringBinary(rn));
-          error = new RuntimeException("Invalid response");
-        } else {
-          error = translateException(t);
-          logException(tries, () -> Stream.of(regionReq), error, serverName);
-          conn.getLocator().updateCachedLocation(regionReq.loc, error);
-          if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
-            failAll(regionReq.gets.stream(), tries, error, serverName);
-            return;
-          }
-          addError(regionReq.gets, error, serverName);
-          failedGets.addAll(regionReq.gets);
-        }
-      }
-    });
-    if (!failedGets.isEmpty()) {
-      tryResubmit(failedGets.stream(), tries);
-    }
-  }
-
-  private void send(Map<ServerName, ? extends Map<byte[], RegionRequest>> getsByServer, int tries) {
-    long callTimeoutNs;
-    if (operationTimeoutNs > 0) {
-      long remainingNs = remainingTimeNs();
-      if (remainingNs <= 0) {
-        failAll(getsByServer.values().stream().flatMap(m -> m.values().stream())
-            .flatMap(r -> r.gets.stream()),
-          tries);
-        return;
-      }
-      callTimeoutNs = Math.min(remainingNs, rpcTimeoutNs);
-    } else {
-      callTimeoutNs = rpcTimeoutNs;
-    }
-    getsByServer.forEach((sn, getsByRegion) -> {
-      ClientService.Interface stub;
-      try {
-        stub = conn.getRegionServerStub(sn);
-      } catch (IOException e) {
-        onError(getsByRegion, tries, e, sn);
-        return;
-      }
-      ClientProtos.MultiRequest req;
-      try {
-        req = buildReq(getsByRegion);
-      } catch (IOException e) {
-        onError(getsByRegion, tries, e, sn);
-        return;
-      }
-      HBaseRpcController controller = conn.rpcControllerFactory.newController();
-      resetController(controller, callTimeoutNs);
-      stub.multi(controller, req, resp -> {
-        if (controller.failed()) {
-          onError(getsByRegion, tries, controller.getFailed(), sn);
-        } else {
-          try {
-            onComplete(getsByRegion, tries, sn,
-              ResponseConverter.getResults(req, resp, controller.cellScanner()));
-          } catch (Exception e) {
-            onError(getsByRegion, tries, e, sn);
-            return;
-          }
-        }
-      });
-    });
-  }
-
-  private void onError(Map<byte[], RegionRequest> getsByRegion, int tries, Throwable t,
-      ServerName serverName) {
-    Throwable error = translateException(t);
-    logException(tries, () -> getsByRegion.values().stream(), error, serverName);
-    if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
-      failAll(getsByRegion.values().stream().flatMap(r -> r.gets.stream()), tries, error,
-        serverName);
-      return;
-    }
-    List<Get> copiedGets =
-        getsByRegion.values().stream().flatMap(r -> r.gets.stream()).collect(Collectors.toList());
-    addError(copiedGets, error, serverName);
-    tryResubmit(copiedGets.stream(), tries);
-  }
-
-  private void tryResubmit(Stream<Get> gets, int tries) {
-    long delayNs;
-    if (operationTimeoutNs > 0) {
-      long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS;
-      if (maxDelayNs <= 0) {
-        failAll(gets, tries);
-        return;
-      }
-      delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1));
-    } else {
-      delayNs = getPauseTime(pauseNs, tries - 1);
-    }
-    retryTimer.newTimeout(t -> groupAndSend(gets, tries + 1), delayNs, TimeUnit.NANOSECONDS);
-  }
-
-  private void groupAndSend(Stream<Get> gets, int tries) {
-    long locateTimeoutNs;
-    if (operationTimeoutNs > 0) {
-      locateTimeoutNs = remainingTimeNs();
-      if (locateTimeoutNs <= 0) {
-        failAll(gets, tries);
-        return;
-      }
-    } else {
-      locateTimeoutNs = -1L;
-    }
-    ConcurrentMap<ServerName, ConcurrentMap<byte[], RegionRequest>> getsByServer =
-        new ConcurrentHashMap<>();
-    ConcurrentLinkedQueue<Get> locateFailed = new ConcurrentLinkedQueue<>();
-    CompletableFuture.allOf(gets.map(get -> conn.getLocator()
-        .getRegionLocation(tableName, get.getRow(), RegionLocateType.CURRENT, locateTimeoutNs)
-        .whenComplete((loc, error) -> {
-          if (error != null) {
-            error = translateException(error);
-            if (error instanceof DoNotRetryIOException) {
-              failOne(get, tries, error, EnvironmentEdgeManager.currentTime(), "");
-              return;
-            }
-            addError(get, error, null);
-            locateFailed.add(get);
-          } else {
-            ConcurrentMap<byte[], RegionRequest> getsByRegion = computeIfAbsent(getsByServer,
-              loc.getServerName(), () -> new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR));
-            computeIfAbsent(getsByRegion, loc.getRegionInfo().getRegionName(),
-              () -> new RegionRequest(loc)).gets.add(get);
-          }
-        })).toArray(CompletableFuture[]::new)).whenComplete((v, r) -> {
-          if (!getsByServer.isEmpty()) {
-            send(getsByServer, tries);
-          }
-          if (!locateFailed.isEmpty()) {
-            tryResubmit(locateFailed.stream(), tries);
-          }
-        });
-  }
-
-  public List<CompletableFuture<Result>> call() {
-    groupAndSend(gets.stream(), 1);
-    return futures;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index d240fab..c90bee2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -258,48 +258,55 @@ class AsyncRpcRetryingCallerFactory {
     return new ScanSingleRegionCallerBuilder();
   }
 
-  public class MultiGetCallerBuilder {
+  public class BatchCallerBuilder {
 
     private TableName tableName;
 
-    private List<Get> gets;
+    private List<? extends Row> actions;
 
     private long operationTimeoutNs = -1L;
 
-    private long rpcTimeoutNs = -1L;
+    private long readRpcTimeoutNs = -1L;
+
+    private long writeRpcTimeoutNs = -1L;
 
-    public MultiGetCallerBuilder table(TableName tableName) {
+    public BatchCallerBuilder table(TableName tableName) {
       this.tableName = tableName;
       return this;
     }
 
-    public MultiGetCallerBuilder gets(List<Get> gets) {
-      this.gets = gets;
+    public BatchCallerBuilder actions(List<? extends Row> actions) {
+      this.actions = actions;
       return this;
     }
 
-    public MultiGetCallerBuilder operationTimeout(long operationTimeout, TimeUnit unit) {
+    public BatchCallerBuilder operationTimeout(long operationTimeout, TimeUnit unit) {
       this.operationTimeoutNs = unit.toNanos(operationTimeout);
       return this;
     }
 
-    public MultiGetCallerBuilder rpcTimeout(long rpcTimeout, TimeUnit unit) {
-      this.rpcTimeoutNs = unit.toNanos(rpcTimeout);
+    public BatchCallerBuilder readRpcTimeout(long rpcTimeout, TimeUnit unit) {
+      this.readRpcTimeoutNs = unit.toNanos(rpcTimeout);
+      return this;
+    }
+
+    public BatchCallerBuilder writeRpcTimeout(long rpcTimeout, TimeUnit unit) {
+      this.writeRpcTimeoutNs = unit.toNanos(rpcTimeout);
       return this;
     }
 
-    public AsyncMultiGetRpcRetryingCaller build() {
-      return new AsyncMultiGetRpcRetryingCaller(retryTimer, conn, tableName, gets,
+    public <T> AsyncBatchRpcRetryingCaller<T> build() {
+      return new AsyncBatchRpcRetryingCaller<T>(retryTimer, conn, tableName, actions,
           conn.connConf.getPauseNs(), conn.connConf.getMaxRetries(), operationTimeoutNs,
-          rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt());
+          readRpcTimeoutNs, writeRpcTimeoutNs, conn.connConf.getStartLogErrorsCnt());
     }
 
-    public List<CompletableFuture<Result>> call() {
-      return build().call();
+    public <T> List<CompletableFuture<T>> call() {
+      return this.<T> build().call();
     }
   }
 
-  public MultiGetCallerBuilder multiGet() {
-    return new MultiGetCallerBuilder();
+  public BatchCallerBuilder batch() {
+    return new BatchCallerBuilder();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 81c806f..5bf6195 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -161,7 +161,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     if (closeScanner) {
       closeScanner();
     }
-    future.completeExceptionally(new RetriesExhaustedException(tries, exceptions));
+    future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
   }
 
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION",

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
index 0b4add1..04e69af 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
@@ -120,7 +120,7 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
   }
 
   private void completeExceptionally() {
-    future.completeExceptionally(new RetriesExhaustedException(tries, exceptions));
+    future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
   }
 
   private void onError(Throwable error, Supplier<String> errMsg,

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
index a2b5247..19a22c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
@@ -17,12 +17,16 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static java.util.stream.Collectors.toList;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.toCheckExistenceOnly;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.voidBatch;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.voidBatchAll;
+
 import com.google.common.base.Preconditions;
 
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
@@ -30,7 +34,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
 
 /**
  * The base interface for asynchronous version of Table. Obtain an instance from a
@@ -126,11 +129,7 @@ public interface AsyncTableBase {
    *         be wrapped by a {@link CompletableFuture}.
    */
   default CompletableFuture<Boolean> exists(Get get) {
-    if (!get.isCheckExistenceOnly()) {
-      get = ReflectionUtils.newInstance(get.getClass(), get);
-      get.setCheckExistenceOnly(true);
-    }
-    return get(get).thenApply(r -> r.getExists());
+    return get(toCheckExistenceOnly(get)).thenApply(r -> r.getExists());
   }
 
   /**
@@ -362,7 +361,9 @@ public interface AsyncTableBase {
    * @param gets The objects that specify what data to fetch and from which rows.
    * @return A list of {@link CompletableFuture}s that represent the result for each get.
    */
-  List<CompletableFuture<Result>> get(List<Get> gets);
+  default List<CompletableFuture<Result>> get(List<Get> gets) {
+    return batch(gets);
+  }
 
   /**
    * A simple version for batch get. It will fail if there are any failures and you will get the
@@ -371,8 +372,90 @@ public interface AsyncTableBase {
    * @return A {@link CompletableFuture} that wrapper the result list.
    */
   default CompletableFuture<List<Result>> getAll(List<Get> gets) {
-    List<CompletableFuture<Result>> futures = get(gets);
+    return batchAll(gets);
+  }
+
+  /**
+   * Test for the existence of columns in the table, as specified by the Gets.
+   * <p>
+   * This will return a list of booleans. Each value will be true if the related Get matches one or
+   * more keys, false if not.
+   * <p>
+   * This is a server-side call so it prevents any data from being transferred to the client.
+   * @param gets the Gets
+   * @return A list of {@link CompletableFuture}s that represent the existence for each get.
+   */
+  default List<CompletableFuture<Boolean>> exists(List<Get> gets) {
+    return get(toCheckExistenceOnly(gets)).stream().map(f -> f.thenApply(r -> r.getExists()))
+        .collect(toList());
+  }
+
+  /**
+   * A simple version for batch exists. It will fail if there are any failures and you will get the
+   * whole result boolean list at once if the operation is succeeded.
+   * @param gets the Gets
+   * @return A {@link CompletableFuture} that wrapper the result boolean list.
+   */
+  default CompletableFuture<List<Boolean>> existsAll(List<Get> gets) {
+    return getAll(toCheckExistenceOnly(gets))
+        .thenApply(l -> l.stream().map(r -> r.getExists()).collect(toList()));
+  }
+
+  /**
+   * Puts some data in the table, in batch.
+   * @param puts The list of mutations to apply.
+   * @return A list of {@link CompletableFuture}s that represent the result for each put.
+   */
+  default List<CompletableFuture<Void>> put(List<Put> puts) {
+    return voidBatch(this, puts);
+  }
+
+  /**
+   * A simple version of batch put. It will fail if there are any failures.
+   * @param puts The list of mutations to apply.
+   * @return A {@link CompletableFuture} that always returns null when complete normally.
+   */
+  default CompletableFuture<Void> putAll(List<Put> puts) {
+    return voidBatchAll(this, puts);
+  }
+
+  /**
+   * Deletes the specified cells/rows in bulk.
+   * @param deletes list of things to delete.
+   * @return A list of {@link CompletableFuture}s that represent the result for each delete.
+   */
+  default List<CompletableFuture<Void>> delete(List<Delete> deletes) {
+    return voidBatch(this, deletes);
+  }
+
+  /**
+   * A simple version of batch delete. It will fail if there are any failures.
+   * @param deletes list of things to delete.
+   * @return A {@link CompletableFuture} that always returns null when complete normally.
+   */
+  default CompletableFuture<Void> deleteAll(List<Delete> deletes) {
+    return voidBatchAll(this, deletes);
+  }
+
+  /**
+   * Method that does a batch call on Deletes, Gets, Puts, Increments and Appends. The ordering of
+   * execution of the actions is not defined. Meaning if you do a Put and a Get in the same
+   * {@link #batch} call, you will not necessarily be guaranteed that the Get returns what the Put
+   * had put.
+   * @param actions list of Get, Put, Delete, Increment, Append objects
+   * @return A list of {@link CompletableFuture}s that represent the result for each action.
+   */
+  <T> List<CompletableFuture<T>> batch(List<? extends Row> actions);
+
+  /**
+   * A simple version of batch. It will fail if there are any failures and you will get the whole
+   * result list at once if the operation is succeeded.
+   * @param actions list of Get, Put, Delete, Increment, Append objects
+   * @return A list of the result for the actions. Wrapped by a {@link CompletableFuture}.
+   */
+  default <T> CompletableFuture<List<T>> batchAll(List<? extends Row> actions) {
+    List<CompletableFuture<T>> futures = batch(actions);
     return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
-        .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(Collectors.toList()));
+        .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index 6cc2551..7281185 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -194,7 +194,7 @@ class AsyncTableImpl implements AsyncTable {
   }
 
   @Override
-  public List<CompletableFuture<Result>> get(List<Get> gets) {
-    return rawTable.get(gets).stream().map(this::wrap).collect(Collectors.toList());
+  public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
+    return rawTable.<T> batch(actions).stream().map(this::wrap).collect(Collectors.toList());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index cc27992..4355182 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
 
@@ -28,6 +29,8 @@ import java.lang.reflect.UndeclaredThrowableException;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
 import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
@@ -49,6 +52,7 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.ipc.RemoteException;
 
 /**
@@ -59,11 +63,11 @@ public final class ConnectionUtils {
 
   private static final Log LOG = LogFactory.getLog(ConnectionUtils.class);
 
-  private ConnectionUtils() {}
+  private ConnectionUtils() {
+  }
 
   /**
-   * Calculate pause time.
-   * Built on {@link HConstants#RETRY_BACKOFF}.
+   * Calculate pause time. Built on {@link HConstants#RETRY_BACKOFF}.
    * @param pause time to pause
    * @param tries amount of tries
    * @return How long to wait after <code>tries</code> retries
@@ -83,7 +87,6 @@ public final class ConnectionUtils {
     return normalPause + jitter;
   }
 
-
   /**
    * Adds / subs an up to 50% jitter to a pause time. Minimum is 1.
    * @param pause the expected pause.
@@ -103,24 +106,23 @@ public final class ConnectionUtils {
    * @param cnm Replaces the nonce generator used, for testing.
    * @return old nonce generator.
    */
-  public static NonceGenerator injectNonceGeneratorForTesting(
-      ClusterConnection conn, NonceGenerator cnm) {
+  public static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn,
+      NonceGenerator cnm) {
     return ConnectionImplementation.injectNonceGeneratorForTesting(conn, cnm);
   }
 
   /**
-   * Changes the configuration to set the number of retries needed when using Connection
-   * internally, e.g. for  updating catalog tables, etc.
-   * Call this method before we create any Connections.
+   * Changes the configuration to set the number of retries needed when using Connection internally,
+   * e.g. for updating catalog tables, etc. Call this method before we create any Connections.
    * @param c The Configuration instance to set the retries into.
    * @param log Used to log what we set in here.
    */
-  public static void setServerSideHConnectionRetriesConfig(
-      final Configuration c, final String sn, final Log log) {
+  public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn,
+      final Log log) {
     // TODO: Fix this. Not all connections from server side should have 10 times the retries.
     int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
       HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-    // Go big.  Multiply by 10.  If we can't get to meta after this many retries
+    // Go big. Multiply by 10. If we can't get to meta after this many retries
     // then something seriously wrong.
     int serversideMultiplier = c.getInt("hbase.client.serverside.retries.multiplier", 10);
     int retries = hcRetries * serversideMultiplier;
@@ -141,9 +143,9 @@ public final class ConnectionUtils {
    * @throws IOException if IO failure occurred
    */
   public static ClusterConnection createShortCircuitConnection(final Configuration conf,
-    ExecutorService pool, User user, final ServerName serverName,
-    final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
-    throws IOException {
+      ExecutorService pool, User user, final ServerName serverName,
+      final AdminService.BlockingInterface admin, final ClientService.BlockingInterface client)
+      throws IOException {
     if (user == null) {
       user = UserProvider.instantiate(conf).getCurrent();
     }
@@ -166,8 +168,7 @@ public final class ConnectionUtils {
    */
   @VisibleForTesting
   public static void setupMasterlessConnection(Configuration conf) {
-    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL,
-      MasterlessConnection.class.getName());
+    conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, MasterlessConnection.class.getName());
   }
 
   /**
@@ -175,8 +176,7 @@ public final class ConnectionUtils {
    * region re-lookups.
    */
   static class MasterlessConnection extends ConnectionImplementation {
-    MasterlessConnection(Configuration conf,
-      ExecutorService pool, User user) throws IOException {
+    MasterlessConnection(Configuration conf, ExecutorService pool, User user) throws IOException {
       super(conf, pool, user);
     }
 
@@ -197,8 +197,7 @@ public final class ConnectionUtils {
   /**
    * Get a unique key for the rpc stub to the given server.
    */
-  static String getStubKey(String serviceName, ServerName serverName,
-      boolean hostnameCanChange) {
+  static String getStubKey(String serviceName, ServerName serverName, boolean hostnameCanChange) {
     // Sometimes, servers go down and they come back up with the same hostname but a different
     // IP address. Force a resolution of the rsHostname by trying to instantiate an
     // InetSocketAddress, and this way we will rightfully get a new stubKey.
@@ -327,4 +326,25 @@ public final class ConnectionUtils {
 
   // Add a delta to avoid timeout immediately after a retry sleeping.
   static final long SLEEP_DELTA_NS = TimeUnit.MILLISECONDS.toNanos(1);
+
+  static Get toCheckExistenceOnly(Get get) {
+    if (get.isCheckExistenceOnly()) {
+      return get;
+    }
+    return ReflectionUtils.newInstance(get.getClass(), get).setCheckExistenceOnly(true);
+  }
+
+  static List<Get> toCheckExistenceOnly(List<Get> gets) {
+    return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
+  }
+
+  static List<CompletableFuture<Void>> voidBatch(AsyncTableBase table,
+      List<? extends Row> actions) {
+    return table.<Object> batch(actions).stream().map(f -> f.<Void> thenApply(r -> null))
+        .collect(toList());
+  }
+
+  static CompletableFuture<Void> voidBatchAll(AsyncTableBase table, List<? extends Row> actions) {
+    return table.<Object> batchAll(actions).thenApply(r -> null);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 6fad0da..347c85b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -407,9 +407,10 @@ class RawAsyncTableImpl implements RawAsyncTable {
   }
 
   @Override
-  public List<CompletableFuture<Result>> get(List<Get> gets) {
-    return conn.callerFactory.multiGet().table(tableName).gets(gets)
+  public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
+    return conn.callerFactory.batch().table(tableName).actions(actions)
         .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-        .rpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS).call();
+        .readRpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS)
+        .writeRpcTimeout(writeRpcTimeoutNs, TimeUnit.NANOSECONDS).call();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 446cd89..424d578 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -52,9 +52,8 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
@@ -670,7 +669,7 @@ public final class RequestConverter {
    * @throws IOException
    */
   public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName,
-      final List<Action> actions, final List<CellScannable> cells,
+      final Iterable<Action> actions, final List<CellScannable> cells,
       final RegionAction.Builder regionActionBuilder,
       final ClientProtos.Action.Builder actionBuilder,
       final MutationProto.Builder mutationBuilder) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
index 3028111..5614d8e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
@@ -22,10 +22,8 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.io.UncheckedIOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
-import java.util.concurrent.CompletableFuture;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
@@ -62,12 +60,10 @@ public abstract class AbstractTestAsyncTableScan {
     TEST_UTIL.createTable(TABLE_NAME, FAMILY, splitKeys);
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
-    RawAsyncTable table = ASYNC_CONN.getRawTable(TABLE_NAME);
-    List<CompletableFuture<?>> futures = new ArrayList<>();
-    IntStream.range(0, COUNT).forEach(
-      i -> futures.add(table.put(new Put(Bytes.toBytes(String.format("%03d", i)))
-          .addColumn(FAMILY, CQ1, Bytes.toBytes(i)).addColumn(FAMILY, CQ2, Bytes.toBytes(i * i)))));
-    CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[0])).get();
+    ASYNC_CONN.getRawTable(TABLE_NAME).putAll(IntStream.range(0, COUNT)
+        .mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
+            .addColumn(FAMILY, CQ1, Bytes.toBytes(i)).addColumn(FAMILY, CQ2, Bytes.toBytes(i * i)))
+        .collect(Collectors.toList())).get();
   }
 
   @AfterClass

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncGetMultiThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncGetMultiThread.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncGetMultiThread.java
deleted file mode 100644
index d24501d..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncGetMultiThread.java
+++ /dev/null
@@ -1,150 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
-import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
-import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
-import static org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.TABLES_ON_MASTER;
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.IntStream;
-
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.io.ByteBufferPool;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Threads;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Will split the table, and move region randomly when testing.
- */
-@Category({ LargeTests.class, ClientTests.class })
-public class TestAsyncGetMultiThread {
-  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  private static TableName TABLE_NAME = TableName.valueOf("async");
-
-  private static byte[] FAMILY = Bytes.toBytes("cf");
-
-  private static byte[] QUALIFIER = Bytes.toBytes("cq");
-
-  private static int COUNT = 1000;
-
-  private static AsyncConnection CONN;
-
-  private static byte[][] SPLIT_KEYS;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
-    TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);
-    TEST_UTIL.getConfiguration().setLong(HBASE_RPC_READ_TIMEOUT_KEY, 1000L);
-    TEST_UTIL.getConfiguration().setInt(HBASE_CLIENT_RETRIES_NUMBER, 1000);
-    TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100);
-    TEST_UTIL.startMiniCluster(5);
-    SPLIT_KEYS = new byte[8][];
-    for (int i = 111; i < 999; i += 111) {
-      SPLIT_KEYS[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
-    }
-    TEST_UTIL.createTable(TABLE_NAME, FAMILY);
-    TEST_UTIL.waitTableAvailable(TABLE_NAME);
-    CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
-    RawAsyncTable table = CONN.getRawTable(TABLE_NAME);
-    List<CompletableFuture<?>> futures = new ArrayList<>();
-    IntStream.range(0, COUNT)
-        .forEach(i -> futures.add(table.put(new Put(Bytes.toBytes(String.format("%03d", i)))
-            .addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i)))));
-    CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[0])).get();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    IOUtils.closeQuietly(CONN);
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  private void run(AtomicBoolean stop) throws InterruptedException, ExecutionException {
-    while (!stop.get()) {
-      int i = ThreadLocalRandom.current().nextInt(COUNT);
-      assertEquals(i,
-        Bytes.toInt(
-          CONN.getRawTable(TABLE_NAME).get(new Get(Bytes.toBytes(String.format("%03d", i)))).get()
-              .getValue(FAMILY, QUALIFIER)));
-    }
-  }
-
-  @Test
-  public void test() throws IOException, InterruptedException, ExecutionException {
-    int numThreads = 20;
-    AtomicBoolean stop = new AtomicBoolean(false);
-    ExecutorService executor =
-        Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));
-    List<Future<?>> futures = new ArrayList<>();
-    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
-      run(stop);
-      return null;
-    })));
-    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
-    Admin admin = TEST_UTIL.getAdmin();
-    for (byte[] splitPoint : SPLIT_KEYS) {
-      admin.split(TABLE_NAME, splitPoint);
-      for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
-        region.compact(true);
-      }
-      Thread.sleep(5000);
-      admin.balancer(true);
-      Thread.sleep(5000);
-      ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
-      ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
-          .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer))
-          .findAny().get();
-      admin.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
-        Bytes.toBytes(newMetaServer.getServerName()));
-      Thread.sleep(5000);
-    }
-    stop.set(true);
-    executor.shutdown();
-    for (Future<?> future : futures) {
-      future.get();
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java
new file mode 100644
index 0000000..308b9e5
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableBatch.java
@@ -0,0 +1,236 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ForkJoinPool;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameter;
+import org.junit.runners.Parameterized.Parameters;
+
+@RunWith(Parameterized.class)
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableBatch {
+
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static TableName TABLE_NAME = TableName.valueOf("async");
+
+  private static byte[] FAMILY = Bytes.toBytes("cf");
+
+  private static byte[] CQ = Bytes.toBytes("cq");
+
+  private static int COUNT = 1000;
+
+  private static AsyncConnection CONN;
+
+  private static byte[][] SPLIT_KEYS;
+
+  @Parameter(0)
+  public String tableType;
+
+  @Parameter(1)
+  public Function<TableName, AsyncTableBase> tableGetter;
+
+  private static RawAsyncTable getRawTable(TableName tableName) {
+    return CONN.getRawTable(tableName);
+  }
+
+  private static AsyncTable getTable(TableName tableName) {
+    return CONN.getTable(tableName, ForkJoinPool.commonPool());
+  }
+
+  @Parameters(name = "{index}: type={0}")
+  public static List<Object[]> params() {
+    Function<TableName, AsyncTableBase> rawTableGetter = TestAsyncTableBatch::getRawTable;
+    Function<TableName, AsyncTableBase> tableGetter = TestAsyncTableBatch::getTable;
+    return Arrays.asList(new Object[] { "raw", rawTableGetter },
+      new Object[] { "normal", tableGetter });
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.startMiniCluster(3);
+    SPLIT_KEYS = new byte[8][];
+    for (int i = 111; i < 999; i += 111) {
+      SPLIT_KEYS[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
+    }
+    CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    CONN.close();
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setUpBeforeTest() throws IOException, InterruptedException {
+    TEST_UTIL.createTable(TABLE_NAME, FAMILY, SPLIT_KEYS);
+    TEST_UTIL.waitTableAvailable(TABLE_NAME);
+  }
+
+  @After
+  public void tearDownAfterTest() throws IOException {
+    Admin admin = TEST_UTIL.getAdmin();
+    if (admin.isTableEnabled(TABLE_NAME)) {
+      admin.disableTable(TABLE_NAME);
+    }
+    admin.deleteTable(TABLE_NAME);
+  }
+
+  private byte[] getRow(int i) {
+    return Bytes.toBytes(String.format("%03d", i));
+  }
+
+  @Test
+  public void test() throws InterruptedException, ExecutionException, IOException {
+    AsyncTableBase table = tableGetter.apply(TABLE_NAME);
+    table.putAll(IntStream.range(0, COUNT)
+        .mapToObj(i -> new Put(getRow(i)).addColumn(FAMILY, CQ, Bytes.toBytes(i)))
+        .collect(Collectors.toList())).get();
+    List<Result> results =
+        table
+            .getAll(IntStream.range(0, COUNT)
+                .mapToObj(
+                  i -> Arrays.asList(new Get(getRow(i)), new Get(Arrays.copyOf(getRow(i), 4))))
+                .flatMap(l -> l.stream()).collect(Collectors.toList()))
+            .get();
+    assertEquals(2 * COUNT, results.size());
+    for (int i = 0; i < COUNT; i++) {
+      assertEquals(i, Bytes.toInt(results.get(2 * i).getValue(FAMILY, CQ)));
+      assertTrue(results.get(2 * i + 1).isEmpty());
+    }
+    Admin admin = TEST_UTIL.getAdmin();
+    admin.flush(TABLE_NAME);
+    TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME).forEach(r -> {
+      byte[] startKey = r.getRegionInfo().getStartKey();
+      int number = startKey.length == 0 ? 55 : Integer.parseInt(Bytes.toString(startKey));
+      byte[] splitPoint = Bytes.toBytes(String.format("%03d", number + 55));
+      try {
+        admin.splitRegion(r.getRegionInfo().getRegionName(), splitPoint);
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    });
+    // we are not going to test the function of split so no assertion here. Just wait for a while
+    // and then start our work.
+    Thread.sleep(5000);
+    table.deleteAll(
+      IntStream.range(0, COUNT).mapToObj(i -> new Delete(getRow(i))).collect(Collectors.toList()))
+        .get();
+    results = table
+        .getAll(
+          IntStream.range(0, COUNT).mapToObj(i -> new Get(getRow(i))).collect(Collectors.toList()))
+        .get();
+    assertEquals(COUNT, results.size());
+    results.forEach(r -> assertTrue(r.isEmpty()));
+  }
+
+  @Test
+  public void testMixed() throws InterruptedException, ExecutionException {
+    AsyncTableBase table = tableGetter.apply(TABLE_NAME);
+    table.putAll(IntStream.range(0, 5)
+        .mapToObj(i -> new Put(Bytes.toBytes(i)).addColumn(FAMILY, CQ, Bytes.toBytes((long) i)))
+        .collect(Collectors.toList())).get();
+    List<Row> actions = new ArrayList<>();
+    actions.add(new Get(Bytes.toBytes(0)));
+    actions.add(new Put(Bytes.toBytes(1)).addColumn(FAMILY, CQ, Bytes.toBytes((long) 2)));
+    actions.add(new Delete(Bytes.toBytes(2)));
+    actions.add(new Increment(Bytes.toBytes(3)).addColumn(FAMILY, CQ, 1));
+    actions.add(new Append(Bytes.toBytes(4)).add(FAMILY, CQ, Bytes.toBytes(4)));
+    List<Object> results = table.batchAll(actions).get();
+    assertEquals(5, results.size());
+    Result getResult = (Result) results.get(0);
+    assertEquals(0, Bytes.toLong(getResult.getValue(FAMILY, CQ)));
+    assertEquals(2, Bytes.toLong(table.get(new Get(Bytes.toBytes(1))).get().getValue(FAMILY, CQ)));
+    assertTrue(table.get(new Get(Bytes.toBytes(2))).get().isEmpty());
+    Result incrementResult = (Result) results.get(3);
+    assertEquals(4, Bytes.toLong(incrementResult.getValue(FAMILY, CQ)));
+    Result appendResult = (Result) results.get(4);
+    byte[] appendValue = appendResult.getValue(FAMILY, CQ);
+    assertEquals(12, appendValue.length);
+    assertEquals(4, Bytes.toLong(appendValue));
+    assertEquals(4, Bytes.toInt(appendValue, 8));
+  }
+
+  public static final class ErrorInjectObserver extends BaseRegionObserver {
+
+    @Override
+    public void preGetOp(ObserverContext<RegionCoprocessorEnvironment> e, Get get,
+        List<Cell> results) throws IOException {
+      if (e.getEnvironment().getRegionInfo().getEndKey().length == 0) {
+        throw new DoNotRetryRegionException("Inject Error");
+      }
+    }
+  }
+
+  @Test
+  public void testPartialSuccess() throws IOException, InterruptedException, ExecutionException {
+    Admin admin = TEST_UTIL.getAdmin();
+    HTableDescriptor htd = admin.getTableDescriptor(TABLE_NAME);
+    htd.addCoprocessor(ErrorInjectObserver.class.getName());
+    admin.modifyTable(TABLE_NAME, htd);
+    AsyncTableBase table = tableGetter.apply(TABLE_NAME);
+    table.putAll(Arrays.asList(SPLIT_KEYS).stream().map(k -> new Put(k).addColumn(FAMILY, CQ, k))
+        .collect(Collectors.toList())).get();
+    List<CompletableFuture<Result>> futures = table
+        .get(Arrays.asList(SPLIT_KEYS).stream().map(k -> new Get(k)).collect(Collectors.toList()));
+    for (int i = 0; i < SPLIT_KEYS.length - 1; i++) {
+      assertArrayEquals(SPLIT_KEYS[i], futures.get(i).get().getValue(FAMILY, CQ));
+    }
+    try {
+      futures.get(SPLIT_KEYS.length - 1).get();
+    } catch (ExecutionException e) {
+      assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
new file mode 100644
index 0000000..da8141b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
+import static org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.TABLES_ON_MASTER;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.io.ByteBufferPool;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Will split the table, and move region randomly when testing.
+ */
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableGetMultiThreaded {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static TableName TABLE_NAME = TableName.valueOf("async");
+
+  private static byte[] FAMILY = Bytes.toBytes("cf");
+
+  private static byte[] QUALIFIER = Bytes.toBytes("cq");
+
+  private static int COUNT = 1000;
+
+  private static AsyncConnection CONN;
+
+  private static byte[][] SPLIT_KEYS;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
+    TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);
+    TEST_UTIL.getConfiguration().setLong(HBASE_RPC_READ_TIMEOUT_KEY, 1000L);
+    TEST_UTIL.getConfiguration().setInt(HBASE_CLIENT_RETRIES_NUMBER, 1000);
+    TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100);
+    TEST_UTIL.startMiniCluster(5);
+    SPLIT_KEYS = new byte[8][];
+    for (int i = 111; i < 999; i += 111) {
+      SPLIT_KEYS[i / 111 - 1] = Bytes.toBytes(String.format("%03d", i));
+    }
+    TEST_UTIL.createTable(TABLE_NAME, FAMILY);
+    TEST_UTIL.waitTableAvailable(TABLE_NAME);
+    CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
+    CONN.getRawTable(TABLE_NAME)
+        .putAll(
+          IntStream.range(0, COUNT).mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
+              .addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))).collect(Collectors.toList()))
+        .get();
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    IOUtils.closeQuietly(CONN);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  private void run(AtomicBoolean stop) throws InterruptedException, ExecutionException {
+    while (!stop.get()) {
+      int i = ThreadLocalRandom.current().nextInt(COUNT);
+      assertEquals(i,
+        Bytes.toInt(
+          CONN.getRawTable(TABLE_NAME).get(new Get(Bytes.toBytes(String.format("%03d", i)))).get()
+              .getValue(FAMILY, QUALIFIER)));
+    }
+  }
+
+  @Test
+  public void test() throws IOException, InterruptedException, ExecutionException {
+    int numThreads = 20;
+    AtomicBoolean stop = new AtomicBoolean(false);
+    ExecutorService executor =
+        Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-"));
+    List<Future<?>> futures = new ArrayList<>();
+    IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> {
+      run(stop);
+      return null;
+    })));
+    Collections.shuffle(Arrays.asList(SPLIT_KEYS), new Random(123));
+    Admin admin = TEST_UTIL.getAdmin();
+    for (byte[] splitPoint : SPLIT_KEYS) {
+      admin.split(TABLE_NAME, splitPoint);
+      for (HRegion region : TEST_UTIL.getHBaseCluster().getRegions(TABLE_NAME)) {
+        region.compact(true);
+      }
+      Thread.sleep(5000);
+      admin.balancer(true);
+      Thread.sleep(5000);
+      ServerName metaServer = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
+      ServerName newMetaServer = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
+          .map(t -> t.getRegionServer().getServerName()).filter(s -> !s.equals(metaServer))
+          .findAny().get();
+      admin.move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
+        Bytes.toBytes(newMetaServer.getServerName()));
+      Thread.sleep(5000);
+    }
+    stop.set(true);
+    executor.shutdown();
+    for (Future<?> future : futures) {
+      future.get();
+    }
+  }
+}


[33/50] [abbrv] hbase git commit: HBASE-17068 Procedure v2 - inherit region locks (Matteo Bertozzi)

Posted by sy...@apache.org.
HBASE-17068 Procedure v2 - inherit region locks (Matteo Bertozzi)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/306ef83c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/306ef83c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/306ef83c

Branch: refs/heads/hbase-12439
Commit: 306ef83c9cde9730ae2268db3814d59b936de4c1
Parents: 319ecd8
Author: Michael Stack <st...@apache.org>
Authored: Tue Dec 27 16:17:45 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 27 16:17:45 2016 -0800

----------------------------------------------------------------------
 .../procedure/MasterProcedureScheduler.java     | 67 ++++++++++++--------
 .../procedure/TestMasterProcedureScheduler.java | 43 +++++++++++++
 2 files changed, 85 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/306ef83c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 691442c..3f588ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -412,15 +412,27 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return exclusiveLockProcIdOwner == procId;
     }
 
-    public boolean tryExclusiveLock(final long procIdOwner) {
-      assert procIdOwner != Long.MIN_VALUE;
-      if (hasExclusiveLock() && !isLockOwner(procIdOwner)) return false;
-      exclusiveLockProcIdOwner = procIdOwner;
+    public boolean hasParentLock(final Procedure proc) {
+      return proc.hasParent() &&
+        (isLockOwner(proc.getParentProcId()) || isLockOwner(proc.getRootProcId()));
+    }
+
+    public boolean hasLockAccess(final Procedure proc) {
+      return isLockOwner(proc.getProcId()) || hasParentLock(proc);
+    }
+
+    public boolean tryExclusiveLock(final Procedure proc) {
+      if (hasExclusiveLock()) return hasLockAccess(proc);
+      exclusiveLockProcIdOwner = proc.getProcId();
       return true;
     }
 
-    private void releaseExclusiveLock() {
-      exclusiveLockProcIdOwner = Long.MIN_VALUE;
+    public boolean releaseExclusiveLock(final Procedure proc) {
+      if (isLockOwner(proc.getProcId())) {
+        exclusiveLockProcIdOwner = Long.MIN_VALUE;
+        return true;
+      }
+      return false;
     }
 
     public HRegionInfo getRegionInfo() {
@@ -443,7 +455,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   public static class TableQueue extends QueueImpl<TableName> {
     private final NamespaceQueue namespaceQueue;
 
-    private HashMap<HRegionInfo, RegionEvent> regionEventMap;
+    private HashMap<String, RegionEvent> regionEventMap;
     private TableLock tableLock = null;
 
     public TableQueue(TableName tableName, NamespaceQueue namespaceQueue, int priority) {
@@ -476,18 +488,18 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
 
     public synchronized RegionEvent getRegionEvent(final HRegionInfo regionInfo) {
       if (regionEventMap == null) {
-        regionEventMap = new HashMap<HRegionInfo, RegionEvent>();
+        regionEventMap = new HashMap<String, RegionEvent>();
       }
-      RegionEvent event = regionEventMap.get(regionInfo);
+      RegionEvent event = regionEventMap.get(regionInfo.getEncodedName());
       if (event == null) {
         event = new RegionEvent(regionInfo);
-        regionEventMap.put(regionInfo, event);
+        regionEventMap.put(regionInfo.getEncodedName(), event);
       }
       return event;
     }
 
     public synchronized void removeRegionEvent(final RegionEvent event) {
-      regionEventMap.remove(event.getRegionInfo());
+      regionEventMap.remove(event.getRegionInfo().getEncodedName());
       if (regionEventMap.isEmpty()) {
         regionEventMap = null;
       }
@@ -675,7 +687,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       hasXLock = queue.tryZkExclusiveLock(lockManager, procedure.toString());
       if (!hasXLock) {
         schedLock();
-        if (!hasParentLock) queue.releaseExclusiveLock();
+        if (!hasParentLock) queue.releaseExclusiveLock(procedure);
         queue.getNamespaceQueue().releaseSharedLock();
         addToRunQueue(tableRunQueue, queue);
         schedUnlock();
@@ -699,7 +711,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     }
 
     schedLock();
-    if (!hasParentLock) queue.releaseExclusiveLock();
+    if (!hasParentLock) queue.releaseExclusiveLock(procedure);
     queue.getNamespaceQueue().releaseSharedLock();
     addToRunQueue(tableRunQueue, queue);
     schedUnlock();
@@ -846,11 +858,11 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         assert i == 0 || regionInfo[i] != regionInfo[i-1] : "duplicate region: " + regionInfo[i];
 
         event[i] = queue.getRegionEvent(regionInfo[i]);
-        if (!event[i].tryExclusiveLock(procedure.getProcId())) {
+        if (!event[i].tryExclusiveLock(procedure)) {
           suspendProcedure(event[i], procedure);
           hasLock = false;
           while (i-- > 0) {
-            event[i].releaseExclusiveLock();
+            event[i].releaseExclusiveLock(procedure);
           }
           break;
         }
@@ -892,12 +904,13 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         assert i == 0 || regionInfo[i] != regionInfo[i-1] : "duplicate region: " + regionInfo[i];
 
         RegionEvent event = queue.getRegionEvent(regionInfo[i]);
-        event.releaseExclusiveLock();
-        if (event.hasWaitingProcedures()) {
-          // release one procedure at the time since regions has an xlock
-          nextProcs[numProcs++] = event.popWaitingProcedure(true);
-        } else {
-          queue.removeRegionEvent(event);
+        if (event.releaseExclusiveLock(procedure)) {
+          if (event.hasWaitingProcedures()) {
+            // release one procedure at the time since regions has an xlock
+            nextProcs[numProcs++] = event.popWaitingProcedure(true);
+          } else {
+            queue.removeRegionEvent(event);
+          }
         }
       }
     }
@@ -960,7 +973,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       final TableQueue tableQueue = getTableQueue(TableName.NAMESPACE_TABLE_NAME);
       final NamespaceQueue queue = getNamespaceQueue(nsName);
 
-      queue.releaseExclusiveLock();
+      queue.releaseExclusiveLock(procedure);
       if (tableQueue.releaseSharedLock()) {
         addToRunQueue(tableRunQueue, tableQueue);
       }
@@ -1005,7 +1018,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     schedLock();
     try {
       ServerQueue queue = getServerQueue(serverName);
-      queue.releaseExclusiveLock();
+      queue.releaseExclusiveLock(procedure);
       addToRunQueue(serverRunQueue, queue);
     } finally {
       schedUnlock();
@@ -1135,8 +1148,12 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return true;
     }
 
-    public synchronized void releaseExclusiveLock() {
-      exclusiveLockProcIdOwner = Long.MIN_VALUE;
+    public synchronized boolean releaseExclusiveLock(final Procedure proc) {
+      if (isLockOwner(proc.getProcId())) {
+        exclusiveLockProcIdOwner = Long.MIN_VALUE;
+        return true;
+      }
+      return false;
     }
 
     // This should go away when we have the new AM and its events

http://git-wip-us.apache.org/repos/asf/hbase/blob/306ef83c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 776416f..7397168 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -543,6 +543,49 @@ public class TestMasterProcedureScheduler {
   }
 
   @Test
+  public void testInheritedRegionXLock() {
+    final TableName tableName = TableName.valueOf("testInheritedRegionXLock");
+    final HRegionInfo region = new HRegionInfo(tableName, Bytes.toBytes("a"), Bytes.toBytes("b"));
+
+    queue.addBack(new TestRegionProcedure(1, tableName,
+        TableProcedureInterface.TableOperationType.SPLIT, region));
+    queue.addBack(new TestRegionProcedure(1, 2, tableName,
+        TableProcedureInterface.TableOperationType.UNASSIGN, region));
+    queue.addBack(new TestRegionProcedure(3, tableName,
+        TableProcedureInterface.TableOperationType.REGION_EDIT, region));
+
+    // fetch the root proc and take the lock on the region
+    Procedure rootProc = queue.poll();
+    assertEquals(1, rootProc.getProcId());
+    assertEquals(false, queue.waitRegion(rootProc, region));
+
+    // fetch the sub-proc and take the lock on the region (inherited lock)
+    Procedure childProc = queue.poll();
+    assertEquals(2, childProc.getProcId());
+    assertEquals(false, queue.waitRegion(childProc, region));
+
+    // proc-3 will be fetched but it can't take the lock
+    Procedure proc = queue.poll();
+    assertEquals(3, proc.getProcId());
+    assertEquals(true, queue.waitRegion(proc, region));
+
+    // release the child lock
+    queue.wakeRegion(childProc, region);
+
+    // nothing in the queue (proc-3 is suspended)
+    assertEquals(null, queue.poll(0));
+
+    // release the root lock
+    queue.wakeRegion(rootProc, region);
+
+    // proc-3 should be now available
+    proc = queue.poll();
+    assertEquals(3, proc.getProcId());
+    assertEquals(false, queue.waitRegion(proc, region));
+    queue.wakeRegion(proc, region);
+  }
+
+  @Test
   public void testSuspendedProcedure() throws Exception {
     final TableName tableName = TableName.valueOf("testSuspendedProcedure");
 


[32/50] [abbrv] hbase git commit: HBASE-16524 Procedure v2 - Compute WALs cleanup on wal modification and not on every sync (Matteo Bertozzi)

Posted by sy...@apache.org.
HBASE-16524 Procedure v2 - Compute WALs cleanup on wal modification and not on every sync (Matteo Bertozzi)

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/319ecd86
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/319ecd86
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/319ecd86

Branch: refs/heads/hbase-12439
Commit: 319ecd867a2903c4ce03c38f6ffec62ada1a6049
Parents: ccb8d67
Author: Michael Stack <st...@apache.org>
Authored: Tue Dec 27 13:53:43 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 27 16:12:45 2016 -0800

----------------------------------------------------------------------
 .../procedure2/store/ProcedureStoreTracker.java | 195 +++++++++----------
 .../store/wal/ProcedureWALFormatReader.java     |  24 ++-
 .../procedure2/store/wal/WALProcedureStore.java | 146 +++++++++-----
 .../store/TestProcedureStoreTracker.java        | 109 +++--------
 .../store/wal/TestWALProcedureStore.java        | 117 ++++++++---
 5 files changed, 322 insertions(+), 269 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/319ecd86/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 7ba72f2..0899767 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -156,11 +156,18 @@ public class ProcedureStoreTracker {
       partial = false;
     }
 
-    public BitSetNode(BitSetNode other) {
+    public BitSetNode(final BitSetNode other, final boolean resetDelete) {
       this.start = other.start;
       this.partial = other.partial;
       this.updated = other.updated.clone();
-      this.deleted = other.deleted.clone();
+      if (resetDelete) {
+        this.deleted = new long[other.deleted.length];
+        for (int i = 0; i < this.deleted.length; ++i) {
+          this.deleted[i] = ~(other.updated[i]);
+        }
+      } else {
+        this.deleted = other.deleted.clone();
+      }
     }
 
     public void update(final long procId) {
@@ -171,11 +178,11 @@ public class ProcedureStoreTracker {
       updateState(procId, true);
     }
 
-    public Long getStart() {
+    public long getStart() {
       return start;
     }
 
-    public Long getEnd() {
+    public long getEnd() {
       return start + (updated.length << ADDRESS_BITS_PER_WORD) - 1;
     }
 
@@ -250,33 +257,6 @@ public class ProcedureStoreTracker {
     }
 
     /**
-     * If an active (non-deleted) procedure in current BitSetNode has been updated in {@code other}
-     * BitSetNode, then delete it from current node.
-     * @return true if node changed, i.e. some procedure(s) from {@code other} was subtracted from
-     * current node.
-     */
-    public boolean subtract(BitSetNode other) {
-      // Assert that other node intersects with this node.
-      assert !(other.getEnd() < this.start) && !(this.getEnd() < other.start);
-      int thisOffset = 0, otherOffset = 0;
-      if (this.start < other.start) {
-        thisOffset = (int) (other.start - this.start) / BITS_PER_WORD;
-      } else {
-        otherOffset = (int) (this.start - other.start) / BITS_PER_WORD;
-      }
-      int size = Math.min(this.updated.length - thisOffset, other.updated.length - otherOffset);
-      boolean nonZeroIntersect = false;
-      for (int i = 0; i < size; i++) {
-        long intersect = ~this.deleted[thisOffset + i] & other.updated[otherOffset + i];
-        if (intersect != 0) {
-          this.deleted[thisOffset + i] |= intersect;
-          nonZeroIntersect = true;
-        }
-      }
-      return nonZeroIntersect;
-    }
-
-    /**
      * Convert to
      * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker.TrackerNode
      * protobuf.
@@ -292,7 +272,6 @@ public class ProcedureStoreTracker {
       return builder.build();
     }
 
-
     // ========================================================================
     //  Grow/Merge Helpers
     // ========================================================================
@@ -461,20 +440,22 @@ public class ProcedureStoreTracker {
   /**
    * Resets internal state to same as given {@code tracker}. Does deep copy of the bitmap.
    */
-  public void resetTo(ProcedureStoreTracker tracker) {
+  public void resetTo(final ProcedureStoreTracker tracker) {
+    resetTo(tracker, false);
+  }
+
+  public void resetTo(final ProcedureStoreTracker tracker, final boolean resetDelete) {
     this.partial = tracker.partial;
     this.minUpdatedProcId = tracker.minUpdatedProcId;
     this.maxUpdatedProcId = tracker.maxUpdatedProcId;
     this.keepDeletes = tracker.keepDeletes;
     for (Map.Entry<Long, BitSetNode> entry : tracker.map.entrySet()) {
-      map.put(entry.getKey(), new BitSetNode(entry.getValue()));
+      map.put(entry.getKey(), new BitSetNode(entry.getValue(), resetDelete));
     }
   }
 
   public void insert(long procId) {
-    BitSetNode node = getOrCreateNode(procId);
-    node.update(procId);
-    trackProcIds(procId);
+    insert(null, procId);
   }
 
   public void insert(final long[] procIds) {
@@ -484,46 +465,108 @@ public class ProcedureStoreTracker {
   }
 
   public void insert(final long procId, final long[] subProcIds) {
-    update(procId);
+    BitSetNode node = null;
+    node = update(node, procId);
     for (int i = 0; i < subProcIds.length; ++i) {
-      insert(subProcIds[i]);
+      node = insert(node, subProcIds[i]);
     }
   }
 
+  private BitSetNode insert(BitSetNode node, final long procId) {
+    if (node == null || !node.contains(procId)) {
+      node = getOrCreateNode(procId);
+    }
+    node.update(procId);
+    trackProcIds(procId);
+    return node;
+  }
+
   public void update(long procId) {
-    Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
-    assert entry != null : "expected node to update procId=" + procId;
+    update(null, procId);
+  }
 
-    BitSetNode node = entry.getValue();
-    assert node.contains(procId);
+  private BitSetNode update(BitSetNode node, final long procId) {
+    node = lookupClosestNode(node, procId);
+    assert node != null : "expected node to update procId=" + procId;
+    assert node.contains(procId) : "expected procId=" + procId + " in the node";
     node.update(procId);
     trackProcIds(procId);
+    return node;
   }
 
   public void delete(long procId) {
-    Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
-    assert entry != null : "expected node to delete procId=" + procId;
+    delete(null, procId);
+  }
 
-    BitSetNode node = entry.getValue();
-    assert node.contains(procId) : "expected procId in the node";
-    node.delete(procId);
+  public void delete(final long[] procIds) {
+    Arrays.sort(procIds);
+    BitSetNode node = null;
+    for (int i = 0; i < procIds.length; ++i) {
+      node = delete(node, procIds[i]);
+    }
+  }
 
+  private BitSetNode delete(BitSetNode node, final long procId) {
+    node = lookupClosestNode(node, procId);
+    assert node != null : "expected node to delete procId=" + procId;
+    assert node.contains(procId) : "expected procId=" + procId + " in the node";
+    node.delete(procId);
     if (!keepDeletes && node.isEmpty()) {
       // TODO: RESET if (map.size() == 1)
-      map.remove(entry.getKey());
+      map.remove(node.getStart());
     }
 
     trackProcIds(procId);
+    return node;
   }
 
-  public void delete(long[] procIds) {
-    // TODO: optimize
-    Arrays.sort(procIds);
-    for (int i = 0; i < procIds.length; ++i) {
-      delete(procIds[i]);
+  @InterfaceAudience.Private
+  public void setDeleted(final long procId, final boolean isDeleted) {
+    BitSetNode node = getOrCreateNode(procId);
+    assert node.contains(procId) : "expected procId=" + procId + " in the node=" + node;
+    node.updateState(procId, isDeleted);
+    trackProcIds(procId);
+  }
+
+  public void setDeletedIfSet(final long... procId) {
+    BitSetNode node = null;
+    for (int i = 0; i < procId.length; ++i) {
+      node = lookupClosestNode(node, procId[i]);
+      if (node != null && node.isUpdated(procId[i])) {
+        node.delete(procId[i]);
+      }
     }
   }
 
+  public void setDeletedIfSet(final ProcedureStoreTracker tracker) {
+    BitSetNode trackerNode = null;
+    for (BitSetNode node: map.values()) {
+      final long minProcId = node.getStart();
+      final long maxProcId = node.getEnd();
+      for (long procId = minProcId; procId <= maxProcId; ++procId) {
+        if (!node.isUpdated(procId)) continue;
+
+        trackerNode = tracker.lookupClosestNode(trackerNode, procId);
+        if (trackerNode == null || !trackerNode.contains(procId) || trackerNode.isUpdated(procId)) {
+          // the procedure was removed or updated
+          node.delete(procId);
+        }
+      }
+    }
+  }
+
+  /**
+   * lookup the node containing the specified procId.
+   * @param node cached node to check before doing a lookup
+   * @param procId the procId to lookup
+   * @return the node that may contains the procId or null
+   */
+  private BitSetNode lookupClosestNode(final BitSetNode node, final long procId) {
+    if (node != null && node.contains(procId)) return node;
+    final Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
+    return entry != null ? entry.getValue() : null;
+  }
+
   private void trackProcIds(long procId) {
     minUpdatedProcId = Math.min(minUpdatedProcId, procId);
     maxUpdatedProcId = Math.max(maxUpdatedProcId, procId);
@@ -537,14 +580,6 @@ public class ProcedureStoreTracker {
     return maxUpdatedProcId;
   }
 
-  @InterfaceAudience.Private
-  public void setDeleted(final long procId, final boolean isDeleted) {
-    BitSetNode node = getOrCreateNode(procId);
-    assert node.contains(procId) : "expected procId=" + procId + " in the node=" + node;
-    node.updateState(procId, isDeleted);
-    trackProcIds(procId);
-  }
-
   public void reset() {
     this.keepDeletes = false;
     this.partial = false;
@@ -632,11 +667,6 @@ public class ProcedureStoreTracker {
     return true;
   }
 
-  public boolean isTracking(long minId, long maxId) {
-    // TODO: we can make it more precise, instead of looking just at the block
-    return map.floorEntry(minId) != null || map.floorEntry(maxId) != null;
-  }
-
   /**
    * Clears the list of updated procedure ids. This doesn't affect global list of active
    * procedure ids.
@@ -737,37 +767,6 @@ public class ProcedureStoreTracker {
     }
   }
 
-  /**
-   * Iterates over
-   * {@link BitSetNode}s in this.map and subtracts with corresponding ones from {@code other}
-   * tracker.
-   * @return true if tracker changed, i.e. some procedure from {@code other} were subtracted from
-   * current tracker.
-   */
-  public boolean subtract(ProcedureStoreTracker other) {
-    // Can not intersect partial bitmap.
-    assert !partial && !other.partial;
-    boolean nonZeroIntersect = false;
-    for (Map.Entry<Long, BitSetNode> currentEntry : map.entrySet()) {
-      BitSetNode currentBitSetNode = currentEntry.getValue();
-      Map.Entry<Long, BitSetNode> otherTrackerEntry = other.map.floorEntry(currentEntry.getKey());
-      if (otherTrackerEntry == null  // No node in other map with key <= currentEntry.getKey().
-          // First entry in other map doesn't intersect with currentEntry.
-          || otherTrackerEntry.getValue().getEnd() < currentEntry.getKey()) {
-        otherTrackerEntry = other.map.ceilingEntry(currentEntry.getKey());
-        if (otherTrackerEntry == null || !currentBitSetNode.contains(otherTrackerEntry.getKey())) {
-          // No node in other map intersects with currentBitSetNode's range.
-          continue;
-        }
-      }
-      do {
-        nonZeroIntersect |= currentEntry.getValue().subtract(otherTrackerEntry.getValue());
-        otherTrackerEntry = other.map.higherEntry(otherTrackerEntry.getKey());
-      } while (otherTrackerEntry != null && currentBitSetNode.contains(otherTrackerEntry.getKey()));
-    }
-    return nonZeroIntersect;
-  }
-
   // ========================================================================
   //  Convert to/from Protocol Buffer.
   // ========================================================================

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ecd86/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
index e5c8fca..aeae569 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java
@@ -101,8 +101,18 @@ public class ProcedureWALFormatReader {
   private final WalProcedureMap localProcedureMap = new WalProcedureMap(1024);
   private final WalProcedureMap procedureMap = new WalProcedureMap(1024);
 
-  // private long compactionLogId;
-  private long maxProcId = 0;
+  private final ProcedureWALFormat.Loader loader;
+
+  /**
+   * Global tracker that will be used by the WALProcedureStore after load.
+   * If the last WAL was closed cleanly we already have a full tracker ready to be used.
+   * If the last WAL was truncated (e.g. master killed) the tracker will be empty
+   * and the 'partial' flag will be set. In this case on WAL replay we are going
+   * to rebuild the tracker.
+   */
+  private final ProcedureStoreTracker tracker;
+  // private final boolean hasFastStartSupport;
+
   /**
    * If tracker for a log file is partial (see {@link ProcedureStoreTracker#partial}), we
    * re-build the list of procedures updated in that WAL because we need it for log cleaning
@@ -113,13 +123,9 @@ public class ProcedureWALFormatReader {
    * {@link ProcedureStoreTracker.BitSetNode#subtract(ProcedureStoreTracker.BitSetNode)}).
    */
   private ProcedureStoreTracker localTracker;
-  private final ProcedureWALFormat.Loader loader;
-  /**
-   * Global tracker. If set to partial, it will be updated as procedures are loaded from wals,
-   * otherwise not.
-   */
-  private final ProcedureStoreTracker tracker;
-  // private final boolean hasFastStartSupport;
+
+  // private long compactionLogId;
+  private long maxProcId = 0;
 
   public ProcedureWALFormatReader(final ProcedureStoreTracker tracker,
       ProcedureWALFormat.Loader loader) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ecd86/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 3884e39..922b681 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -72,6 +72,14 @@ public class WALProcedureStore extends ProcedureStoreBase {
     void recoverFileLease(FileSystem fs, Path path) throws IOException;
   }
 
+  public static final String WAL_COUNT_WARN_THRESHOLD_CONF_KEY =
+    "hbase.procedure.store.wal.warn.threshold";
+  private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD = 64;
+
+  public static final String EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY =
+    "hbase.procedure.store.wal.exec.cleanup.on.load";
+  private static final boolean DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true;
+
   public static final String MAX_RETRIES_BEFORE_ROLL_CONF_KEY =
     "hbase.procedure.store.wal.max.retries.before.roll";
   private static final int DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3;
@@ -106,6 +114,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
   private static final int DEFAULT_SYNC_STATS_COUNT = 10;
 
   private final LinkedList<ProcedureWALFile> logs = new LinkedList<>();
+  private final ProcedureStoreTracker holdingCleanupTracker = new ProcedureStoreTracker();
   private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker();
   private final ReentrantLock lock = new ReentrantLock();
   private final Condition waitCond = lock.newCondition();
@@ -132,6 +141,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
   private Thread syncThread;
   private ByteSlot[] slots;
 
+  private int walCountWarnThreshold;
   private int maxRetriesBeforeRoll;
   private int maxSyncFailureRoll;
   private int waitBeforeRoll;
@@ -195,6 +205,8 @@ public class WALProcedureStore extends ProcedureStoreBase {
     }
 
     // Tunings
+    walCountWarnThreshold =
+      conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, DEFAULT_WAL_COUNT_WARN_THRESHOLD);
     maxRetriesBeforeRoll =
       conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL);
     maxSyncFailureRoll = conf.getInt(MAX_SYNC_FAILURE_ROLL_CONF_KEY, DEFAULT_MAX_SYNC_FAILURE_ROLL);
@@ -257,6 +269,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
       log.close();
     }
     logs.clear();
+    loading.set(true);
   }
 
   private void sendStopSignal() {
@@ -335,24 +348,25 @@ public class WALProcedureStore extends ProcedureStoreBase {
 
   @Override
   public void load(final ProcedureLoader loader) throws IOException {
-    if (logs.isEmpty()) {
-      throw new RuntimeException("recoverLease() must be called before loading data");
-    }
+    lock.lock();
+    try {
+      if (logs.isEmpty()) {
+        throw new RuntimeException("recoverLease() must be called before loading data");
+      }
 
-    // Nothing to do, If we have only the current log.
-    if (logs.size() == 1) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("No state logs to replay.");
+      // Nothing to do, If we have only the current log.
+      if (logs.size() == 1) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("No state logs to replay.");
+        }
+        loader.setMaxProcId(0);
+        return;
       }
-      loader.setMaxProcId(0);
-      loading.set(false);
-      return;
-    }
 
-    // Load the old logs
-    Iterator<ProcedureWALFile> it = logs.descendingIterator();
-    it.next(); // Skip the current log
-    try {
+      // Load the old logs
+      final Iterator<ProcedureWALFile> it = logs.descendingIterator();
+      it.next(); // Skip the current log
+
       ProcedureWALFormat.load(it, storeTracker, new ProcedureWALFormat.Loader() {
         @Override
         public void setMaxProcId(long maxProcId) {
@@ -379,7 +393,32 @@ public class WALProcedureStore extends ProcedureStoreBase {
         }
       });
     } finally {
-      loading.set(false);
+      try {
+        // try to cleanup inactive wals and complete the operation
+        buildHoldingCleanupTracker();
+        tryCleanupLogsOnLoad();
+        loading.set(false);
+      } finally {
+        lock.unlock();
+      }
+    }
+  }
+
+  private void tryCleanupLogsOnLoad() {
+    // nothing to cleanup.
+    if (logs.size() <= 1) return;
+
+    // the config says to not cleanup wals on load.
+    if (!conf.getBoolean(EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY,
+      DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY)) {
+      LOG.debug("WALs cleanup on load is not enabled: " + getActiveLogs());
+      return;
+    }
+
+    try {
+      periodicRoll();
+    } catch (IOException e) {
+      LOG.warn("unable to cleanup logs on load: " + e.getMessage(), e);
     }
   }
 
@@ -634,16 +673,20 @@ public class WALProcedureStore extends ProcedureStoreBase {
           storeTracker.insert(subProcIds);
         } else {
           storeTracker.insert(procId, subProcIds);
+          holdingCleanupTracker.setDeletedIfSet(procId);
         }
         break;
       case UPDATE:
         storeTracker.update(procId);
+        holdingCleanupTracker.setDeletedIfSet(procId);
         break;
       case DELETE:
         if (subProcIds != null && subProcIds.length > 0) {
           storeTracker.delete(subProcIds);
+          holdingCleanupTracker.setDeletedIfSet(subProcIds);
         } else {
           storeTracker.delete(procId);
+          holdingCleanupTracker.setDeletedIfSet(procId);
         }
         break;
       default:
@@ -948,6 +991,15 @@ public class WALProcedureStore extends ProcedureStoreBase {
     lastRollTs.set(rollTs);
     logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos, rollTs));
 
+    // if it's the first next WAL being added, build the holding cleanup tracker
+    if (logs.size() == 2) {
+      buildHoldingCleanupTracker();
+    } else if (logs.size() > walCountWarnThreshold) {
+      LOG.warn("procedure WALs count=" + logs.size() +
+        " above the warning threshold " + walCountWarnThreshold +
+        ". check running procedures to see if something is stuck.");
+    }
+
     if (LOG.isDebugEnabled()) {
       LOG.debug("Roll new state log: " + logId);
     }
@@ -976,38 +1028,33 @@ public class WALProcedureStore extends ProcedureStoreBase {
   // ==========================================================================
   //  Log Files cleaner helpers
   // ==========================================================================
-
-  /**
-   * Iterates over log files from latest (ignoring currently active one) to oldest, deleting the
-   * ones which don't contain anything useful for recovery.
-   * @throws IOException
-   */
   private void removeInactiveLogs() throws IOException {
-    // TODO: can we somehow avoid first iteration (starting from newest log) and still figure out
-    // efficient way to cleanup old logs.
-    // Alternatively, a complex and maybe more efficient method would be using this iteration to
-    // rewrite latest states of active procedures to a new log file and delete all old ones.
-    if (logs.size() <= 1) return;
-    ProcedureStoreTracker runningTracker = new ProcedureStoreTracker();
-    runningTracker.resetTo(storeTracker);
-    List<ProcedureWALFile> logsToBeDeleted = new ArrayList<>();
-    for (int i = logs.size() - 2; i >= 0; i--) {
-      ProcedureWALFile log = logs.get(i);
-      // If nothing was subtracted, delete the log file since it doesn't contain any useful proc
-      // states.
-      if (!runningTracker.subtract(log.getTracker())) {
-        logsToBeDeleted.add(log);
-      }
+    // We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'.
+    // once there is nothing olding the oldest WAL we can remove it.
+    while (logs.size() > 1 && holdingCleanupTracker.isEmpty()) {
+      removeLogFile(logs.getFirst());
+      buildHoldingCleanupTracker();
     }
-    // Delete the logs from oldest to newest and stop at first log that can't be deleted to avoid
-    // holes in the log file sequence (for better debug capability).
-    while (true) {
-      ProcedureWALFile log = logs.getFirst();
-      if (logsToBeDeleted.contains(log)) {
-        removeLogFile(log);
-      } else {
-        break;
-      }
+
+    // TODO: In case we are holding up a lot of logs for long time we should
+    // rewrite old procedures (in theory parent procs) to the new WAL.
+  }
+
+  private void buildHoldingCleanupTracker() {
+    if (logs.size() <= 1) {
+      // we only have one wal, so nothing to do
+      holdingCleanupTracker.reset();
+      return;
+    }
+
+    // compute the holding tracker.
+    //  - the first WAL is used for the 'updates'
+    //  - the other WALs are scanned to remove procs already in other wals.
+    // TODO: exit early if holdingCleanupTracker.isEmpty()
+    holdingCleanupTracker.resetTo(logs.getFirst().getTracker(), true);
+    holdingCleanupTracker.setDeletedIfSet(storeTracker);
+    for (int i = 1, size = logs.size() - 1; i < size; ++i) {
+      holdingCleanupTracker.setDeletedIfSet(logs.get(i).getTracker());
     }
   }
 
@@ -1020,12 +1067,19 @@ public class WALProcedureStore extends ProcedureStoreBase {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Remove all state logs with ID less than " + lastLogId);
     }
+
+    boolean removed = false;
     while (logs.size() > 1) {
       ProcedureWALFile log = logs.getFirst();
       if (lastLogId < log.getLogId()) {
         break;
       }
       removeLogFile(log);
+      removed = true;
+    }
+
+    if (removed) {
+      buildHoldingCleanupTracker();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ecd86/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
index 76fd2c5..550116e 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureStoreTracker.java
@@ -106,32 +106,6 @@ public class TestProcedureStoreTracker {
   }
 
   @Test
-  public void testIsTracking() {
-    long[][] procIds = new long[][] {{4, 7}, {1024, 1027}, {8192, 8194}};
-    long[][] checkIds = new long[][] {{2, 8}, {1023, 1025}, {8193, 8191}};
-
-    ProcedureStoreTracker tracker = new ProcedureStoreTracker();
-    for (int i = 0; i < procIds.length; ++i) {
-      long[] seq = procIds[i];
-      tracker.insert(seq[0]);
-      tracker.insert(seq[1]);
-    }
-
-    for (int i = 0; i < procIds.length; ++i) {
-      long[] check = checkIds[i];
-      long[] seq = procIds[i];
-      assertTrue(tracker.isTracking(seq[0], seq[1]));
-      assertTrue(tracker.isTracking(check[0], check[1]));
-      tracker.delete(seq[0]);
-      tracker.delete(seq[1]);
-      assertFalse(tracker.isTracking(seq[0], seq[1]));
-      assertFalse(tracker.isTracking(check[0], check[1]));
-    }
-
-    assertTrue(tracker.isEmpty());
-  }
-
-  @Test
   public void testBasicCRUD() {
     ProcedureStoreTracker tracker = new ProcedureStoreTracker();
     assertTrue(tracker.isEmpty());
@@ -287,64 +261,31 @@ public class TestProcedureStoreTracker {
   }
 
   @Test
-  public void testBitSetNodeSubtract() {
-    // 1 not updated in n2, nothing to subtract
-    BitSetNode n1 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{ });
-    BitSetNode n2 = buildBitSetNode(new long[]{ 1L }, new long[]{}, new long[]{});
-    assertFalse(n1.subtract(n2));
-
-    // 1 updated in n2, and not deleted in n1, should subtract.
-    n1 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{});
-    n2 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{});
-    assertTrue(n1.subtract(n2));
-
-    // 1 updated in n2, but deleted in n1, should not subtract
-    n1 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{ 1L });
-    n2 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{});
-    assertFalse(n1.subtract(n2));
-
-    // 1 updated in n2, but not deleted in n1, should subtract.
-    n1 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{});
-    n2 = buildBitSetNode(new long[]{ 1L }, new long[]{ 1L }, new long[]{ 1L });
-    assertTrue(n1.subtract(n2));
-
-    // all four cases together.
-    n1 = buildBitSetNode(new long[]{ 0L, 10L, 20L, 30L  }, new long[]{ 0L, 10L, 20L, 30L  },
-        new long[]{ 20L });
-    n2 = buildBitSetNode(new long[]{ 0L, 10L, 20L, 30L  }, new long[]{ 0L, 20L, 30L },
-        new long[]{ 0L });
-    assertTrue(n1.subtract(n2));
-  }
+  public void testSetDeletedIfSet() {
+    final ProcedureStoreTracker tracker = new ProcedureStoreTracker();
+    final long[] procIds = new long[] { 1, 3, 7, 152, 512, 1024, 1025 };
 
-  @Test
-  // The structure is same as testBitSetNodeSubtract() but the ids are bigger so that internally
-  // there are many BitSetNodes.
-  public void testTrackerSubtract() {
-    // not updated in n2, nothing to subtract
-    ProcedureStoreTracker n1 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L },
-        new long[]{ });
-    ProcedureStoreTracker n2 = buildTracker(new long[]{ 1L, 1000L }, new long[]{}, new long[]{});
-    assertFalse(n1.subtract(n2));
-
-    // updated in n2, and not deleted in n1, should subtract.
-    n1 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L }, new long[]{});
-    n2 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L }, new long[]{});
-    assertTrue(n1.subtract(n2));
-
-    // updated in n2, but also deleted in n1, should not subtract
-    n1 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L });
-    n2 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L }, new long[]{});
-    assertFalse(n1.subtract(n2));
-
-    // updated in n2, but not deleted in n1, should subtract.
-    n1 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L, 1000L }, new long[]{});
-    n2 = buildTracker(new long[]{ 1L, 1000L }, new long[]{ 1L }, new long[]{ 1L, 1000L });
-    assertFalse(n1.subtract(n2));
-
-    n1 = buildTracker(new long[]{ 0L, 100L, 200L, 300L }, new long[]{ 0L, 100L, 200L, 300L },
-        new long[]{ 200L });
-    n2 = buildTracker(new long[]{ 0L, 100L, 200L, 300L }, new long[]{ 0L, 200L, 300L },
-        new long[]{ 0L });
-    assertTrue(n1.subtract(n2));
+    // test single proc
+    for (int i = 0; i < procIds.length; ++i) {
+      tracker.insert(procIds[i]);
+    }
+    assertEquals(false, tracker.isEmpty());
+
+    for (int i = 0; i < procIds.length; ++i) {
+      tracker.setDeletedIfSet(procIds[i] - 1);
+      tracker.setDeletedIfSet(procIds[i]);
+      tracker.setDeletedIfSet(procIds[i] + 1);
+    }
+    assertEquals(true, tracker.isEmpty());
+
+    // test batch
+    tracker.reset();
+    for (int i = 0; i < procIds.length; ++i) {
+      tracker.insert(procIds[i]);
+    }
+    assertEquals(false, tracker.isEmpty());
+
+    tracker.setDeletedIfSet(procIds);
+    assertEquals(true, tracker.isEmpty());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ecd86/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
index 83f481c..f8c3486 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
@@ -31,6 +31,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -72,6 +73,10 @@ public class TestWALProcedureStore {
   private Path testDir;
   private Path logDir;
 
+  private void setupConfig(final Configuration conf) {
+    conf.setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, true);
+  }
+
   @Before
   public void setUp() throws IOException {
     htu = new HBaseCommonTestingUtility();
@@ -79,6 +84,7 @@ public class TestWALProcedureStore {
     fs = testDir.getFileSystem(htu.getConfiguration());
     assertTrue(testDir.depth() > 1);
 
+    setupConfig(htu.getConfiguration());
     logDir = new Path(testDir, "proc-logs");
     procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), fs, logDir);
     procStore.start(PROCEDURE_STORE_SLOTS);
@@ -101,6 +107,19 @@ public class TestWALProcedureStore {
     for (int i = 0; i < 10; ++i) {
       procStore.periodicRollForTesting();
     }
+    assertEquals(1, procStore.getActiveLogs().size());
+    FileStatus[] status = fs.listStatus(logDir);
+    assertEquals(1, status.length);
+  }
+
+  @Test
+  public void testRestartWithoutData() throws Exception {
+    for (int i = 0; i < 10; ++i) {
+      final LoadCounter loader = new LoadCounter();
+      storeRestart(loader);
+    }
+    LOG.info("ACTIVE WALs " + procStore.getActiveLogs());
+    assertEquals(1, procStore.getActiveLogs().size());
     FileStatus[] status = fs.listStatus(logDir);
     assertEquals(1, status.length);
   }
@@ -126,13 +145,13 @@ public class TestWALProcedureStore {
 
   @Test
   public void testWalCleanerSequentialClean() throws Exception {
-    int NUM = 5;
-    List<Procedure> procs = new ArrayList<>();
+    final Procedure[] procs = new Procedure[5];
     ArrayList<ProcedureWALFile> logs = null;
+
     // Insert procedures and roll wal after every insert.
-    for (int i = 0; i < NUM; i++) {
-      procs.add(new TestSequentialProcedure());
-      procStore.insert(procs.get(i), null);
+    for (int i = 0; i < procs.length; i++) {
+      procs[i] = new TestSequentialProcedure();
+      procStore.insert(procs[i], null);
       procStore.rollWriterForTesting();
       logs = procStore.getActiveLogs();
       assertEquals(logs.size(), i + 2);  // Extra 1 for current ongoing wal.
@@ -140,12 +159,13 @@ public class TestWALProcedureStore {
 
     // Delete procedures in sequential order make sure that only the corresponding wal is deleted
     // from logs list.
-    int[] deleteOrder = new int[]{ 0, 1, 2, 3, 4};
+    final int[] deleteOrder = new int[] { 0, 1, 2, 3, 4 };
     for (int i = 0; i < deleteOrder.length; i++) {
-      procStore.delete(procs.get(deleteOrder[i]).getProcId());
+      procStore.delete(procs[deleteOrder[i]].getProcId());
       procStore.removeInactiveLogsForTesting();
-      assertFalse(procStore.getActiveLogs().contains(logs.get(deleteOrder[i])));
-      assertEquals(procStore.getActiveLogs().size(), NUM - i );
+      assertFalse(logs.get(deleteOrder[i]).toString(),
+        procStore.getActiveLogs().contains(logs.get(deleteOrder[i])));
+      assertEquals(procStore.getActiveLogs().size(), procs.length - i);
     }
   }
 
@@ -154,30 +174,29 @@ public class TestWALProcedureStore {
   // they are in the starting of the list.
   @Test
   public void testWalCleanerNoHoles() throws Exception {
-    int NUM = 5;
-    List<Procedure> procs = new ArrayList<>();
+    final Procedure[] procs = new Procedure[5];
     ArrayList<ProcedureWALFile> logs = null;
     // Insert procedures and roll wal after every insert.
-    for (int i = 0; i < NUM; i++) {
-      procs.add(new TestSequentialProcedure());
-      procStore.insert(procs.get(i), null);
+    for (int i = 0; i < procs.length; i++) {
+      procs[i] = new TestSequentialProcedure();
+      procStore.insert(procs[i], null);
       procStore.rollWriterForTesting();
       logs = procStore.getActiveLogs();
-      assertEquals(logs.size(), i + 2);  // Extra 1 for current ongoing wal.
+      assertEquals(i + 2, logs.size());  // Extra 1 for current ongoing wal.
     }
 
-    for (int i = 1; i < NUM; i++) {
-      procStore.delete(procs.get(i).getProcId());
+    for (int i = 1; i < procs.length; i++) {
+      procStore.delete(procs[i].getProcId());
     }
-    assertEquals(procStore.getActiveLogs().size(), NUM + 1);
-    procStore.delete(procs.get(0).getProcId());
-    assertEquals(procStore.getActiveLogs().size(), 1);
+    assertEquals(procs.length + 1, procStore.getActiveLogs().size());
+    procStore.delete(procs[0].getProcId());
+    assertEquals(1, procStore.getActiveLogs().size());
   }
 
   @Test
   public void testWalCleanerUpdates() throws Exception {
-    TestSequentialProcedure p1 = new TestSequentialProcedure(),
-        p2 = new TestSequentialProcedure();
+    TestSequentialProcedure p1 = new TestSequentialProcedure();
+    TestSequentialProcedure p2 = new TestSequentialProcedure();
     procStore.insert(p1, null);
     procStore.insert(p2, null);
     procStore.rollWriterForTesting();
@@ -192,8 +211,8 @@ public class TestWALProcedureStore {
 
   @Test
   public void testWalCleanerUpdatesDontLeaveHoles() throws Exception {
-    TestSequentialProcedure p1 = new TestSequentialProcedure(),
-        p2 = new TestSequentialProcedure();
+    TestSequentialProcedure p1 = new TestSequentialProcedure();
+    TestSequentialProcedure p2 = new TestSequentialProcedure();
     procStore.insert(p1, null);
     procStore.insert(p2, null);
     procStore.rollWriterForTesting();  // generates first log with p1 + p2
@@ -214,6 +233,36 @@ public class TestWALProcedureStore {
   }
 
   @Test
+  public void testWalCleanerWithEmptyRolls() throws Exception {
+    final Procedure[] procs = new Procedure[3];
+    for (int i = 0; i < procs.length; ++i) {
+      procs[i] = new TestSequentialProcedure();
+      procStore.insert(procs[i], null);
+    }
+    assertEquals(1, procStore.getActiveLogs().size());
+    procStore.rollWriterForTesting();
+    assertEquals(2, procStore.getActiveLogs().size());
+    procStore.rollWriterForTesting();
+    assertEquals(3, procStore.getActiveLogs().size());
+
+    for (int i = 0; i < procs.length; ++i) {
+      procStore.update(procs[i]);
+      procStore.rollWriterForTesting();
+      procStore.rollWriterForTesting();
+      if (i < (procs.length - 1)) {
+        assertEquals(3 + ((i + 1) * 2), procStore.getActiveLogs().size());
+      }
+    }
+    assertEquals(7, procStore.getActiveLogs().size());
+
+    for (int i = 0; i < procs.length; ++i) {
+      procStore.delete(procs[i].getProcId());
+      assertEquals(7 - ((i + 1) * 2), procStore.getActiveLogs().size());
+    }
+    assertEquals(1, procStore.getActiveLogs().size());
+  }
+
+  @Test
   public void testEmptyLogLoad() throws Exception {
     LoadCounter loader = new LoadCounter();
     storeRestart(loader);
@@ -294,6 +343,8 @@ public class TestWALProcedureStore {
     }
 
     // Test Load 1
+    // Restart the store (avoid cleaning up the files, to check the rebuilded trackers)
+    htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false);
     LoadCounter loader = new LoadCounter();
     storeRestart(loader);
     assertEquals(1, loader.getLoadedCount());
@@ -360,8 +411,8 @@ public class TestWALProcedureStore {
     assertEquals(0, loader.getCorruptedCount());
   }
 
-  void assertUpdated(final ProcedureStoreTracker tracker, Procedure[] procs,
-      int[] updatedProcs, int[] nonUpdatedProcs) {
+  private static void assertUpdated(final ProcedureStoreTracker tracker,
+      final Procedure[] procs, final int[] updatedProcs, final int[] nonUpdatedProcs) {
     for (int index : updatedProcs) {
       long procId = procs[index].getProcId();
       assertTrue("Procedure id : " + procId, tracker.isUpdated(procId));
@@ -372,8 +423,8 @@ public class TestWALProcedureStore {
     }
   }
 
-  void assertDeleted(final ProcedureStoreTracker tracker, Procedure[] procs,
-      int[] deletedProcs, int[] nonDeletedProcs) {
+  private static void assertDeleted(final ProcedureStoreTracker tracker,
+      final Procedure[] procs, final int[] deletedProcs, final int[] nonDeletedProcs) {
     for (int index : deletedProcs) {
       long procId = procs[index].getProcId();
       assertEquals("Procedure id : " + procId,
@@ -423,7 +474,8 @@ public class TestWALProcedureStore {
       corruptLog(logs[i], 4);
     }
 
-    // Restart the store
+    // Restart the store (avoid cleaning up the files, to check the rebuilded trackers)
+    htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false);
     final LoadCounter loader = new LoadCounter();
     storeRestart(loader);
     assertEquals(3, loader.getLoadedCount());  // procs 1, 3 and 5
@@ -431,6 +483,7 @@ public class TestWALProcedureStore {
 
     // Check the Trackers
     final ArrayList<ProcedureWALFile> walFiles = procStore.getActiveLogs();
+    LOG.info("WALs " + walFiles);
     assertEquals(4, walFiles.size());
     LOG.info("Checking wal " + walFiles.get(0));
     assertUpdated(walFiles.get(0).getTracker(), procs, new int[]{0, 1, 2, 3}, new int[] {4, 5});
@@ -660,7 +713,7 @@ public class TestWALProcedureStore {
 
   @Test
   public void testFileNotFoundDuringLeaseRecovery() throws IOException {
-    TestProcedure[] procs = new TestProcedure[3];
+    final TestProcedure[] procs = new TestProcedure[3];
     for (int i = 0; i < procs.length; ++i) {
       procs[i] = new TestProcedure(i + 1, 0);
       procStore.insert(procs[i], null);
@@ -673,7 +726,7 @@ public class TestWALProcedureStore {
     procStore.stop(false);
 
     FileStatus[] status = fs.listStatus(logDir);
-    assertEquals(procs.length + 2, status.length);
+    assertEquals(procs.length + 1, status.length);
 
     // simulate another active master removing the wals
     procStore = new WALProcedureStore(htu.getConfiguration(), fs, logDir,
@@ -696,7 +749,7 @@ public class TestWALProcedureStore {
     procStore.recoverLease();
     procStore.load(loader);
     assertEquals(procs.length, loader.getMaxProcId());
-    assertEquals(procs.length - 1, loader.getRunnableCount());
+    assertEquals(1, loader.getRunnableCount());
     assertEquals(0, loader.getCompletedCount());
     assertEquals(0, loader.getCorruptedCount());
   }


[26/50] [abbrv] hbase git commit: HBASE-17345 Implement batch

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/8fa5b0b9/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableMultiGet.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableMultiGet.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableMultiGet.java
deleted file mode 100644
index 612e830..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableMultiGet.java
+++ /dev/null
@@ -1,163 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.client;
-
-import static org.junit.Assert.assertEquals;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ForkJoinPool;
-import java.util.function.BiFunction;
-import java.util.function.Supplier;
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.junit.runners.Parameterized.Parameter;
-import org.junit.runners.Parameterized.Parameters;
-
-@RunWith(Parameterized.class)
-@Category({ MediumTests.class, ClientTests.class })
-public class TestAsyncTableMultiGet {
-
-  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-  private static TableName TABLE_NAME = TableName.valueOf("async");
-
-  private static byte[] FAMILY = Bytes.toBytes("cf");
-
-  private static byte[] CQ = Bytes.toBytes("cq");
-
-  private static int COUNT = 100;
-
-  private static AsyncConnection ASYNC_CONN;
-
-  @Parameter
-  public Supplier<AsyncTableBase> getTable;
-
-  private static RawAsyncTable getRawTable() {
-    return ASYNC_CONN.getRawTable(TABLE_NAME);
-  }
-
-  private static AsyncTable getTable() {
-    return ASYNC_CONN.getTable(TABLE_NAME, ForkJoinPool.commonPool());
-  }
-
-  @Parameters
-  public static List<Object[]> params() {
-    return Arrays.asList(new Supplier<?>[] { TestAsyncTableMultiGet::getRawTable },
-      new Supplier<?>[] { TestAsyncTableMultiGet::getTable });
-  }
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    TEST_UTIL.startMiniCluster(3);
-    byte[][] splitKeys = new byte[8][];
-    for (int i = 11; i < 99; i += 11) {
-      splitKeys[i / 11 - 1] = Bytes.toBytes(String.format("%02d", i));
-    }
-    TEST_UTIL.createTable(TABLE_NAME, FAMILY, splitKeys);
-    TEST_UTIL.waitTableAvailable(TABLE_NAME);
-    TEST_UTIL.getAdmin().setBalancerRunning(false, true);
-    ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
-    RawAsyncTable table = ASYNC_CONN.getRawTable(TABLE_NAME);
-    List<CompletableFuture<?>> futures = new ArrayList<>();
-    IntStream.range(0, COUNT).forEach(i -> futures.add(table.put(
-      new Put(Bytes.toBytes(String.format("%02d", i))).addColumn(FAMILY, CQ, Bytes.toBytes(i)))));
-    CompletableFuture.allOf(futures.toArray(new CompletableFuture<?>[0])).get();
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    ASYNC_CONN.close();
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  private void move() throws IOException, InterruptedException {
-    HRegionServer src = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME);
-    HRegionServer dst = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
-        .map(t -> t.getRegionServer()).filter(r -> r != src).findAny().get();
-    Region region = src.getOnlineRegions(TABLE_NAME).stream().findAny().get();
-    TEST_UTIL.getAdmin().move(region.getRegionInfo().getEncodedNameAsBytes(),
-      Bytes.toBytes(dst.getServerName().getServerName()));
-    Thread.sleep(1000);
-  }
-
-  private void test(BiFunction<AsyncTableBase, List<Get>, List<Result>> getFunc)
-      throws IOException, InterruptedException {
-    AsyncTableBase table = getTable.get();
-    List<Get> gets =
-        IntStream.range(0, COUNT).mapToObj(i -> new Get(Bytes.toBytes(String.format("%02d", i))))
-            .collect(Collectors.toList());
-    List<Result> results = getFunc.apply(table, gets);
-    assertEquals(COUNT, results.size());
-    for (int i = 0; i < COUNT; i++) {
-      Result result = results.get(i);
-      assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQ)));
-    }
-    // test basic failure recovery
-    move();
-    results = getFunc.apply(table, gets);
-    assertEquals(COUNT, results.size());
-    for (int i = 0; i < COUNT; i++) {
-      Result result = results.get(i);
-      assertEquals(i, Bytes.toInt(result.getValue(FAMILY, CQ)));
-    }
-  }
-
-  @Test
-  public void testGet() throws InterruptedException, IOException {
-    test((table, gets) -> {
-      return table.get(gets).stream().map(f -> {
-        try {
-          return f.get();
-        } catch (InterruptedException | ExecutionException e) {
-          throw new RuntimeException(e);
-        }
-      }).collect(Collectors.toList());
-    });
-
-  }
-
-  @Test
-  public void testGetAll() throws InterruptedException, IOException {
-    test((table, gets) -> {
-      try {
-        return table.getAll(gets).get();
-      } catch (InterruptedException | ExecutionException e) {
-        throw new RuntimeException(e);
-      }
-    });
-  }
-}


[50/50] [abbrv] hbase git commit: HBASE-17397 AggregationClient cleanup; Reapplied with proper JIRA number (spotted by Duo Zhang)

Posted by sy...@apache.org.
HBASE-17397 AggregationClient cleanup; Reapplied with proper JIRA number (spotted by Duo Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05ab41d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05ab41d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05ab41d1

Branch: refs/heads/hbase-12439
Commit: 05ab41d1bea53295d2c0790fba71c441ff85a6a5
Parents: 0583d79
Author: Michael Stack <st...@apache.org>
Authored: Tue Jan 3 19:17:17 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Jan 3 19:17:17 2017 -0800

----------------------------------------------------------------------
 .../client/coprocessor/AggregationClient.java   | 94 +++++++++++++++-----
 1 file changed, 71 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/05ab41d1/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index cde7d41..d236342 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse;
@@ -59,6 +58,8 @@ import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
 
 /**
  * This client class is for invoking the aggregate functions deployed on the
@@ -81,13 +82,60 @@ import com.google.protobuf.Message;
  * </ul>
  * <p>Call {@link #close()} when done.
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
 public class AggregationClient implements Closeable {
   // TODO: This class is not used.  Move to examples?
   private static final Log log = LogFactory.getLog(AggregationClient.class);
   private final Connection connection;
 
   /**
+   * An RpcController implementation for use here in this endpoint.
+   */
+  static class AggregationClientRpcController implements RpcController {
+    private String errorText;
+    private boolean cancelled = false;
+    private boolean failed = false;
+
+    @Override
+    public String errorText() {
+      return this.errorText;
+    }
+
+    @Override
+    public boolean failed() {
+      return this.failed;
+    }
+
+    @Override
+    public boolean isCanceled() {
+      return this.cancelled;
+    }
+
+    @Override
+    public void notifyOnCancel(RpcCallback<Object> arg0) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void reset() {
+      this.errorText = null;
+      this.cancelled = false;
+      this.failed = false;
+    }
+
+    @Override
+    public void setFailed(String errorText) {
+      this.failed = true;
+      this.errorText = errorText;
+    }
+
+    @Override
+    public void startCancel() {
+      this.cancelled = true;
+    }
+  }
+
+  /**
    * Constructor with Conf object
    * @param cfg
    */
@@ -160,13 +208,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, R>() {
           @Override
           public R call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMax(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -248,13 +296,13 @@ public class AggregationClient implements Closeable {
 
           @Override
           public R call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMin(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -323,13 +371,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Long>() {
           @Override
           public Long call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getRowNum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
             ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
@@ -388,14 +436,14 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, S>() {
           @Override
           public S call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             // Not sure what is going on here why I have to do these casts. TODO.
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getSum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() == 0) {
               return null;
@@ -456,13 +504,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<S, Long>>() {
           @Override
           public Pair<S, Long> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getAvg(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
             if (response.getFirstPartCount() == 0) {
@@ -560,13 +608,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<List<S>, Long>>() {
           @Override
           public Pair<List<S>, Long> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getStd(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L);
             if (response.getFirstPartCount() == 0) {
@@ -676,13 +724,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, List<S>>() {
           @Override
           public List<S> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMedian(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
 
             List<S> list = new ArrayList<S>();


[36/50] [abbrv] hbase git commit: HBASE-17149 Procedure V2 - Fix nonce submission to avoid unnecessary calling coprocessor multiple times; ADDENDUM by Stephen Yuan Jiang

Posted by sy...@apache.org.
HBASE-17149 Procedure V2 - Fix nonce submission to avoid unnecessary calling coprocessor multiple times; ADDENDUM by Stephen Yuan Jiang


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3e0e0df
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3e0e0df
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3e0e0df

Branch: refs/heads/hbase-12439
Commit: a3e0e0df0d0957fc02723aa349f96ba45bda3c7f
Parents: 79e5efd
Author: Michael Stack <st...@apache.org>
Authored: Wed Dec 28 11:10:07 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Dec 28 11:10:19 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java | 20 ++++++++++----------
 .../procedure/TestCloneSnapshotProcedure.java   |  6 ------
 .../procedure/TestCreateNamespaceProcedure.java |  1 -
 .../TestDeleteColumnFamilyProcedure.java        |  4 ----
 .../procedure/TestDeleteNamespaceProcedure.java |  1 -
 .../TestMergeTableRegionsProcedure.java         |  6 ------
 .../procedure/TestModifyNamespaceProcedure.java |  1 -
 .../procedure/TestRestoreSnapshotProcedure.java |  1 -
 .../procedure/TestTableDDLProcedureBase.java    |  6 ------
 .../procedure/TestTruncateTableProcedure.java   |  1 -
 10 files changed, 10 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 613c5c1..ecaaa16 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -142,9 +142,6 @@ import org.apache.hadoop.hbase.regionserver.compactions.FIFOCompactionPolicy;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeers;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
 import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -162,7 +159,6 @@ import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.IdLock;
 import org.apache.hadoop.hbase.util.ModifyRegionUtils;
-import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.VersionInfo;
@@ -2108,7 +2104,8 @@ public class HMaster extends HRegionServer implements MasterServices {
 
         LOG.info(getClientIdAuditPrefix() + " modify " + descriptor);
 
-        // Execute the operation synchronously - wait for the operation to complete before continuing.
+        // Execute the operation synchronously - wait for the operation to complete before
+        // continuing.
         ProcedurePrepareLatch latch = ProcedurePrepareLatch.createLatch(2, 0);
         submitProcedure(new ModifyColumnFamilyProcedure(procedureExecutor.getEnvironment(),
             tableName, descriptor, latch));
@@ -2750,14 +2747,15 @@ public class HMaster extends HRegionServer implements MasterServices {
           throw new BypassCoprocessorException();
         }
         LOG.info(getClientIdAuditPrefix() + " creating " + namespaceDescriptor);
-        // Execute the operation synchronously - wait for the operation to complete before continuing.
+        // Execute the operation synchronously - wait for the operation to complete before
+        // continuing.
         setProcId(getClusterSchema().createNamespace(namespaceDescriptor, getNonceKey()));
         getMaster().getMasterCoprocessorHost().postCreateNamespace(namespaceDescriptor);
       }
 
       @Override
       protected String getDescription() {
-        return "CreateTableProcedure";
+        return "CreateNamespaceProcedure";
       }
     });
   }
@@ -2783,14 +2781,15 @@ public class HMaster extends HRegionServer implements MasterServices {
           throw new BypassCoprocessorException();
         }
         LOG.info(getClientIdAuditPrefix() + " modify " + namespaceDescriptor);
-        // Execute the operation synchronously - wait for the operation to complete before continuing.
+        // Execute the operation synchronously - wait for the operation to complete before
+        // continuing.
         setProcId(getClusterSchema().modifyNamespace(namespaceDescriptor, getNonceKey()));
         getMaster().getMasterCoprocessorHost().postModifyNamespace(namespaceDescriptor);
       }
 
       @Override
       protected String getDescription() {
-        return "CreateTableProcedure";
+        return "ModifyNamespaceProcedure";
       }
     });
   }
@@ -2814,7 +2813,8 @@ public class HMaster extends HRegionServer implements MasterServices {
           throw new BypassCoprocessorException();
         }
         LOG.info(getClientIdAuditPrefix() + " delete " + name);
-        // Execute the operation synchronously - wait for the operation to complete before continuing.
+        // Execute the operation synchronously - wait for the operation to complete before
+        // continuing.
         setProcId(getClusterSchema().deleteNamespace(name, getNonceKey()));
         getMaster().getMasterCoprocessorHost().postDeleteNamespace(name);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
index b0ac627..4304438 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCloneSnapshotProcedure.java
@@ -22,10 +22,7 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.TableExistsException;
@@ -42,9 +39,6 @@ import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
index 2a47a62..b219bd0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestCreateNamespaceProcedure.java
@@ -28,7 +28,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceExistException;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
index 625729a..89ffcb0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteColumnFamilyProcedure.java
@@ -22,11 +22,7 @@ import static org.junit.Assert.assertTrue;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.TableName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
index 666c0ab..a34d6d7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDeleteNamespaceProcedure.java
@@ -28,7 +28,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
index a16df9a..c2f68a6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
@@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
@@ -51,8 +50,6 @@ public class TestMergeTableRegionsProcedure {
   private static final Log LOG = LogFactory.getLog(TestMergeTableRegionsProcedure.class);
 
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static long nonceGroup = HConstants.NO_NONCE;
-  private static long nonce = HConstants.NO_NONCE;
 
   private static final int initialRegionCount = 4;
   private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
@@ -87,9 +84,6 @@ public class TestMergeTableRegionsProcedure {
   @Before
   public void setup() throws Exception {
     resetProcExecutorTestingKillFlag();
-    nonceGroup =
-        MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster());
-    nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster());
     // Turn off balancer so it doesn't cut in and mess up our placements.
     UTIL.getHBaseAdmin().setBalancerRunning(false, true);
     // Turn off the meta scanner so it don't remove parent on us.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
index 34ae31a..2c17089 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestModifyNamespaceProcedure.java
@@ -25,7 +25,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.NamespaceNotFoundException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
index 066a160..4b5ff89 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestRestoreSnapshotProcedure.java
@@ -23,7 +23,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
index 6b0f083..a0b69b9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTableDDLProcedureBase.java
@@ -35,9 +35,6 @@ public abstract class TestTableDDLProcedureBase {
   private static final Log LOG = LogFactory.getLog(TestTableDDLProcedureBase.class);
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
 
-  protected static long nonceGroup;
-  protected static long nonce;
-
   private static void setupConf(Configuration conf) {
     conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
   }
@@ -60,9 +57,6 @@ public abstract class TestTableDDLProcedureBase {
   @Before
   public void setup() throws Exception {
     resetProcExecutorTestingKillFlag();
-    nonceGroup =
-        MasterProcedureTestingUtility.generateNonceGroup(UTIL.getHBaseCluster().getMaster());
-    nonce = MasterProcedureTestingUtility.generateNonce(UTIL.getHBaseCluster().getMaster());
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3e0e0df/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
index 3cc90f5..7d88a85 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestTruncateTableProcedure.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.TruncateTableState;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;


[47/50] [abbrv] hbase git commit: HBASE-17401 Removed unnecessary semicolons in hbase-annotations

Posted by sy...@apache.org.
HBASE-17401 Removed unnecessary semicolons in hbase-annotations

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c3d5f268
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c3d5f268
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c3d5f268

Branch: refs/heads/hbase-12439
Commit: c3d5f268cfb3220808b3aad04e26da8f1339aa75
Parents: 0a93241
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Jan 1 15:04:37 2017 +0100
Committer: Michael Stack <st...@apache.org>
Committed: Sun Jan 1 17:31:44 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/classification/InterfaceAudience.java  | 6 +++---
 .../apache/hadoop/hbase/classification/InterfaceStability.java | 6 +++---
 2 files changed, 6 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c3d5f268/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
index 6e67758..506ef56 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceAudience.java
@@ -50,7 +50,7 @@ public final class InterfaceAudience {
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Public {};
+  public @interface Public {}
 
   /**
    * Intended only for the project(s) specified in the annotation.
@@ -60,14 +60,14 @@ public final class InterfaceAudience {
   @Retention(RetentionPolicy.RUNTIME)
   public @interface LimitedPrivate {
     String[] value();
-  };
+  }
 
   /**
    * Intended for use only within Hadoop itself.
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Private {};
+  public @interface Private {}
 
   private InterfaceAudience() {} // Audience can't exist on its own
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c3d5f268/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
index 338b375..ac20f3a 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java
@@ -47,14 +47,14 @@ public class InterfaceStability {
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Stable {};
+  public @interface Stable {}
 
   /**
    * Evolving, but can break compatibility at minor release (i.e. m.x)
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Evolving {};
+  public @interface Evolving {}
 
   /**
    * No guarantee is provided as to reliability or stability across any
@@ -62,5 +62,5 @@ public class InterfaceStability {
    */
   @Documented
   @Retention(RetentionPolicy.RUNTIME)
-  public @interface Unstable {};
+  public @interface Unstable {}
 }


[09/50] [abbrv] hbase git commit: HBASE-17262 Refactor RpcServer so as to make it extendable and/or pluggable

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
new file mode 100644
index 0000000..7d91a2c
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+
+@InterfaceAudience.Private
+public class RpcServerFactory {
+
+  public static final Log LOG = LogFactory.getLog(RpcServerFactory.class);
+
+  public static final String CUSTOM_RPC_SERVER_IMPL_CONF_KEY = "hbase.rpc.server.impl";
+
+  /**
+   * Private Constructor
+   */
+  private RpcServerFactory() {
+  }
+
+  public static RpcServer createRpcServer(final Server server, final String name,
+      final List<BlockingServiceAndInterface> services,
+      final InetSocketAddress bindAddress, Configuration conf,
+      RpcScheduler scheduler) throws IOException {
+    String rpcServerClass = conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY,
+        SimpleRpcServer.class.getName());
+    LOG.info("Use " + rpcServerClass + " rpc server");
+    return ReflectionUtils.instantiateWithCustomCtor(rpcServerClass,
+        new Class[] { Server.class, String.class, List.class,
+            InetSocketAddress.class, Configuration.class, RpcScheduler.class },
+        new Object[] { server, name, services, bindAddress, conf, scheduler });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
new file mode 100644
index 0000000..01d45cd
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -0,0 +1,1997 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.ipc;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.BindException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketException;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.channels.CancelledKeyException;
+import java.nio.channels.Channels;
+import java.nio.channels.ClosedChannelException;
+import java.nio.channels.GatheringByteChannel;
+import java.nio.channels.ReadableByteChannel;
+import java.nio.channels.SelectionKey;
+import java.nio.channels.Selector;
+import java.nio.channels.ServerSocketChannel;
+import java.nio.channels.SocketChannel;
+import java.nio.channels.WritableByteChannel;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.LongAdder;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import javax.security.sasl.Sasl;
+import javax.security.sasl.SaslException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.VersionInfoUtil;
+import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
+import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
+import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.AuthMethod;
+import org.apache.hadoop.hbase.security.HBasePolicyProvider;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
+import org.apache.hadoop.hbase.security.SaslStatus;
+import org.apache.hadoop.hbase.security.SaslUtil;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHeader;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.htrace.TraceInfo;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * The RPC server with native java NIO implementation deriving from Hadoop to
+ * host protobuf described Services. It's the original one before HBASE-17262,
+ * and the default RPC server for now.
+ *
+ * An RpcServer instance has a Listener that hosts the socket.  Listener has fixed number
+ * of Readers in an ExecutorPool, 10 by default.  The Listener does an accept and then
+ * round robin a Reader is chosen to do the read.  The reader is registered on Selector.  Read does
+ * total read off the channel and the parse from which it makes a Call.  The call is wrapped in a
+ * CallRunner and passed to the scheduler to be run.  Reader goes back to see if more to be done
+ * and loops till done.
+ *
+ * <p>Scheduler can be variously implemented but default simple scheduler has handlers to which it
+ * has given the queues into which calls (i.e. CallRunner instances) are inserted.  Handlers run
+ * taking from the queue.  They run the CallRunner#run method on each item gotten from queue
+ * and keep taking while the server is up.
+ *
+ * CallRunner#run executes the call.  When done, asks the included Call to put itself on new
+ * queue for Responder to pull from and return result to client.
+ *
+ * @see BlockingRpcClient
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SimpleRpcServer extends RpcServer {
+
+  protected int port;                             // port we listen on
+  protected InetSocketAddress address;            // inet address we listen on
+  private int readThreads;                        // number of read threads
+
+  protected int socketSendBufferSize;
+  protected final long purgeTimeout;    // in milliseconds
+
+  // maintains the set of client connections and handles idle timeouts
+  private ConnectionManager connectionManager;
+  private Listener listener = null;
+  protected Responder responder = null;
+
+  /**
+   * Datastructure that holds all necessary to a method invocation and then afterward, carries
+   * the result.
+   */
+  @InterfaceStability.Evolving
+  public class Call extends RpcServer.Call {
+
+    protected Responder responder;
+
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
+        justification="Can't figure why this complaint is happening... see below")
+    Call(int id, final BlockingService service, final MethodDescriptor md,
+        RequestHeader header, Message param, CellScanner cellScanner,
+        Connection connection, Responder responder, long size, TraceInfo tinfo,
+        final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
+      super(id, service, md, header, param, cellScanner, connection, size,
+          tinfo, remoteAddress, timeout, reqCleanup);
+      this.responder = responder;
+    }
+
+    /**
+     * Call is done. Execution happened and we returned results to client. It is now safe to
+     * cleanup.
+     */
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+        justification="Presume the lock on processing request held by caller is protection enough")
+    void done() {
+      super.done();
+      this.getConnection().decRpcCount(); // Say that we're done with this call.
+    }
+
+    @Override
+    public long disconnectSince() {
+      if (!getConnection().isConnectionOpen()) {
+        return System.currentTimeMillis() - timestamp;
+      } else {
+        return -1L;
+      }
+    }
+
+    public synchronized void sendResponseIfReady() throws IOException {
+      // set param null to reduce memory pressure
+      this.param = null;
+      this.responder.doRespond(this);
+    }
+
+    Connection getConnection() {
+      return (Connection) this.connection;
+    }
+
+  }
+
+  /** Listens on the socket. Creates jobs for the handler threads*/
+  private class Listener extends Thread {
+
+    private ServerSocketChannel acceptChannel = null; //the accept channel
+    private Selector selector = null; //the selector that we use for the server
+    private Reader[] readers = null;
+    private int currentReader = 0;
+    private final int readerPendingConnectionQueueLength;
+
+    private ExecutorService readPool;
+
+    public Listener(final String name) throws IOException {
+      super(name);
+      // The backlog of requests that we will have the serversocket carry.
+      int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+      readerPendingConnectionQueueLength =
+          conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
+      // Create a new server socket and set to non blocking mode
+      acceptChannel = ServerSocketChannel.open();
+      acceptChannel.configureBlocking(false);
+
+      // Bind the server socket to the binding addrees (can be different from the default interface)
+      bind(acceptChannel.socket(), bindAddress, backlogLength);
+      port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
+      address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
+      // create a selector;
+      selector = Selector.open();
+
+      readers = new Reader[readThreads];
+      // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it
+      // has an advantage in that it is easy to shutdown the pool.
+      readPool = Executors.newFixedThreadPool(readThreads,
+        new ThreadFactoryBuilder().setNameFormat(
+          "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
+          ",port=" + port).setDaemon(true)
+        .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
+      for (int i = 0; i < readThreads; ++i) {
+        Reader reader = new Reader();
+        readers[i] = reader;
+        readPool.execute(reader);
+      }
+      LOG.info(getName() + ": started " + readThreads + " reader(s) listening on port=" + port);
+
+      // Register accepts on the server socket with the selector.
+      acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
+      this.setName("RpcServer.listener,port=" + port);
+      this.setDaemon(true);
+    }
+
+
+    private class Reader implements Runnable {
+      final private LinkedBlockingQueue<Connection> pendingConnections;
+      private final Selector readSelector;
+
+      Reader() throws IOException {
+        this.pendingConnections =
+          new LinkedBlockingQueue<Connection>(readerPendingConnectionQueueLength);
+        this.readSelector = Selector.open();
+      }
+
+      @Override
+      public void run() {
+        try {
+          doRunLoop();
+        } finally {
+          try {
+            readSelector.close();
+          } catch (IOException ioe) {
+            LOG.error(getName() + ": error closing read selector in " + getName(), ioe);
+          }
+        }
+      }
+
+      private synchronized void doRunLoop() {
+        while (running) {
+          try {
+            // Consume as many connections as currently queued to avoid
+            // unbridled acceptance of connections that starves the select
+            int size = pendingConnections.size();
+            for (int i=size; i>0; i--) {
+              Connection conn = pendingConnections.take();
+              conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
+            }
+            readSelector.select();
+            Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
+            while (iter.hasNext()) {
+              SelectionKey key = iter.next();
+              iter.remove();
+              if (key.isValid()) {
+                if (key.isReadable()) {
+                  doRead(key);
+                }
+              }
+              key = null;
+            }
+          } catch (InterruptedException e) {
+            if (running) {                      // unexpected -- log it
+              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
+            }
+            return;
+          } catch (IOException ex) {
+            LOG.info(getName() + ": IOException in Reader", ex);
+          }
+        }
+      }
+
+      /**
+       * Updating the readSelector while it's being used is not thread-safe,
+       * so the connection must be queued.  The reader will drain the queue
+       * and update its readSelector before performing the next select
+       */
+      public void addConnection(Connection conn) throws IOException {
+        pendingConnections.add(conn);
+        readSelector.wakeup();
+      }
+    }
+
+    @Override
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
+      justification="selector access is not synchronized; seems fine but concerned changing " +
+        "it will have per impact")
+    public void run() {
+      LOG.info(getName() + ": starting");
+      connectionManager.startIdleScan();
+      while (running) {
+        SelectionKey key = null;
+        try {
+          selector.select(); // FindBugs IS2_INCONSISTENT_SYNC
+          Iterator<SelectionKey> iter = selector.selectedKeys().iterator();
+          while (iter.hasNext()) {
+            key = iter.next();
+            iter.remove();
+            try {
+              if (key.isValid()) {
+                if (key.isAcceptable())
+                  doAccept(key);
+              }
+            } catch (IOException ignored) {
+              if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
+            }
+            key = null;
+          }
+        } catch (OutOfMemoryError e) {
+          if (errorHandler != null) {
+            if (errorHandler.checkOOME(e)) {
+              LOG.info(getName() + ": exiting on OutOfMemoryError");
+              closeCurrentConnection(key, e);
+              connectionManager.closeIdle(true);
+              return;
+            }
+          } else {
+            // we can run out of memory if we have too many threads
+            // log the event and sleep for a minute and give
+            // some thread(s) a chance to finish
+            LOG.warn(getName() + ": OutOfMemoryError in server select", e);
+            closeCurrentConnection(key, e);
+            connectionManager.closeIdle(true);
+            try {
+              Thread.sleep(60000);
+            } catch (InterruptedException ex) {
+              LOG.debug("Interrupted while sleeping");
+            }
+          }
+        } catch (Exception e) {
+          closeCurrentConnection(key, e);
+        }
+      }
+      LOG.info(getName() + ": stopping");
+      synchronized (this) {
+        try {
+          acceptChannel.close();
+          selector.close();
+        } catch (IOException ignored) {
+          if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
+        }
+
+        selector= null;
+        acceptChannel= null;
+
+        // close all connections
+        connectionManager.stopIdleScan();
+        connectionManager.closeAll();
+      }
+    }
+
+    private void closeCurrentConnection(SelectionKey key, Throwable e) {
+      if (key != null) {
+        Connection c = (Connection)key.attachment();
+        if (c != null) {
+          closeConnection(c);
+          key.attach(null);
+        }
+      }
+    }
+
+    InetSocketAddress getAddress() {
+      return address;
+    }
+
+    void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
+      ServerSocketChannel server = (ServerSocketChannel) key.channel();
+      SocketChannel channel;
+      while ((channel = server.accept()) != null) {
+        channel.configureBlocking(false);
+        channel.socket().setTcpNoDelay(tcpNoDelay);
+        channel.socket().setKeepAlive(tcpKeepAlive);
+        Reader reader = getReader();
+        Connection c = connectionManager.register(channel);
+        // If the connectionManager can't take it, close the connection.
+        if (c == null) {
+          if (channel.isOpen()) {
+            IOUtils.cleanup(null, channel);
+          }
+          continue;
+        }
+        key.attach(c);  // so closeCurrentConnection can get the object
+        reader.addConnection(c);
+      }
+    }
+
+    void doRead(SelectionKey key) throws InterruptedException {
+      int count;
+      Connection c = (Connection) key.attachment();
+      if (c == null) {
+        return;
+      }
+      c.setLastContact(System.currentTimeMillis());
+      try {
+        count = c.readAndProcess();
+      } catch (InterruptedException ieo) {
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
+        throw ieo;
+      } catch (Exception e) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(getName() + ": Caught exception while reading:", e);
+        }
+        count = -1; //so that the (count < 0) block is executed
+      }
+      if (count < 0) {
+        closeConnection(c);
+        c = null;
+      } else {
+        c.setLastContact(System.currentTimeMillis());
+      }
+    }
+
+    synchronized void doStop() {
+      if (selector != null) {
+        selector.wakeup();
+        Thread.yield();
+      }
+      if (acceptChannel != null) {
+        try {
+          acceptChannel.socket().close();
+        } catch (IOException e) {
+          LOG.info(getName() + ": exception in closing listener socket. " + e);
+        }
+      }
+      readPool.shutdownNow();
+    }
+
+    // The method that will return the next reader to work with
+    // Simplistic implementation of round robin for now
+    Reader getReader() {
+      currentReader = (currentReader + 1) % readers.length;
+      return readers[currentReader];
+    }
+  }
+
+  // Sends responses of RPC back to clients.
+  protected class Responder extends Thread {
+    private final Selector writeSelector;
+    private final Set<Connection> writingCons =
+        Collections.newSetFromMap(new ConcurrentHashMap<Connection, Boolean>());
+
+    Responder() throws IOException {
+      this.setName("RpcServer.responder");
+      this.setDaemon(true);
+      this.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER);
+      writeSelector = Selector.open(); // create a selector
+    }
+
+    @Override
+    public void run() {
+      LOG.debug(getName() + ": starting");
+      try {
+        doRunLoop();
+      } finally {
+        LOG.info(getName() + ": stopping");
+        try {
+          writeSelector.close();
+        } catch (IOException ioe) {
+          LOG.error(getName() + ": couldn't close write selector", ioe);
+        }
+      }
+    }
+
+    /**
+     * Take the list of the connections that want to write, and register them
+     * in the selector.
+     */
+    private void registerWrites() {
+      Iterator<Connection> it = writingCons.iterator();
+      while (it.hasNext()) {
+        Connection c = it.next();
+        it.remove();
+        SelectionKey sk = c.channel.keyFor(writeSelector);
+        try {
+          if (sk == null) {
+            try {
+              c.channel.register(writeSelector, SelectionKey.OP_WRITE, c);
+            } catch (ClosedChannelException e) {
+              // ignore: the client went away.
+              if (LOG.isTraceEnabled()) LOG.trace("ignored", e);
+            }
+          } else {
+            sk.interestOps(SelectionKey.OP_WRITE);
+          }
+        } catch (CancelledKeyException e) {
+          // ignore: the client went away.
+          if (LOG.isTraceEnabled()) LOG.trace("ignored", e);
+        }
+      }
+    }
+
+    /**
+     * Add a connection to the list that want to write,
+     */
+    public void registerForWrite(Connection c) {
+      if (writingCons.add(c)) {
+        writeSelector.wakeup();
+      }
+    }
+
+    private void doRunLoop() {
+      long lastPurgeTime = 0;   // last check for old calls.
+      while (running) {
+        try {
+          registerWrites();
+          int keyCt = writeSelector.select(purgeTimeout);
+          if (keyCt == 0) {
+            continue;
+          }
+
+          Set<SelectionKey> keys = writeSelector.selectedKeys();
+          Iterator<SelectionKey> iter = keys.iterator();
+          while (iter.hasNext()) {
+            SelectionKey key = iter.next();
+            iter.remove();
+            try {
+              if (key.isValid() && key.isWritable()) {
+                doAsyncWrite(key);
+              }
+            } catch (IOException e) {
+              LOG.debug(getName() + ": asyncWrite", e);
+            }
+          }
+
+          lastPurgeTime = purge(lastPurgeTime);
+
+        } catch (OutOfMemoryError e) {
+          if (errorHandler != null) {
+            if (errorHandler.checkOOME(e)) {
+              LOG.info(getName() + ": exiting on OutOfMemoryError");
+              return;
+            }
+          } else {
+            //
+            // we can run out of memory if we have too many threads
+            // log the event and sleep for a minute and give
+            // some thread(s) a chance to finish
+            //
+            LOG.warn(getName() + ": OutOfMemoryError in server select", e);
+            try {
+              Thread.sleep(60000);
+            } catch (InterruptedException ex) {
+              LOG.debug("Interrupted while sleeping");
+              return;
+            }
+          }
+        } catch (Exception e) {
+          LOG.warn(getName() + ": exception in Responder " +
+              StringUtils.stringifyException(e), e);
+        }
+      }
+      LOG.info(getName() + ": stopped");
+    }
+
+    /**
+     * If there were some calls that have not been sent out for a
+     * long time, we close the connection.
+     * @return the time of the purge.
+     */
+    private long purge(long lastPurgeTime) {
+      long now = System.currentTimeMillis();
+      if (now < lastPurgeTime + purgeTimeout) {
+        return lastPurgeTime;
+      }
+
+      ArrayList<Connection> conWithOldCalls = new ArrayList<Connection>();
+      // get the list of channels from list of keys.
+      synchronized (writeSelector.keys()) {
+        for (SelectionKey key : writeSelector.keys()) {
+          Connection connection = (Connection) key.attachment();
+          if (connection == null) {
+            throw new IllegalStateException("Coding error: SelectionKey key without attachment.");
+          }
+          Call call = connection.responseQueue.peekFirst();
+          if (call != null && now > call.timestamp + purgeTimeout) {
+            conWithOldCalls.add(call.getConnection());
+          }
+        }
+      }
+
+      // Seems safer to close the connection outside of the synchronized loop...
+      for (Connection connection : conWithOldCalls) {
+        closeConnection(connection);
+      }
+
+      return now;
+    }
+
+    private void doAsyncWrite(SelectionKey key) throws IOException {
+      Connection connection = (Connection) key.attachment();
+      if (connection == null) {
+        throw new IOException("doAsyncWrite: no connection");
+      }
+      if (key.channel() != connection.channel) {
+        throw new IOException("doAsyncWrite: bad channel");
+      }
+
+      if (processAllResponses(connection)) {
+        try {
+          // We wrote everything, so we don't need to be told when the socket is ready for
+          //  write anymore.
+         key.interestOps(0);
+        } catch (CancelledKeyException e) {
+          /* The Listener/reader might have closed the socket.
+           * We don't explicitly cancel the key, so not sure if this will
+           * ever fire.
+           * This warning could be removed.
+           */
+          LOG.warn("Exception while changing ops : " + e);
+        }
+      }
+    }
+
+    /**
+     * Process the response for this call. You need to have the lock on
+     * {@link org.apache.hadoop.hbase.ipc.SimpleRpcServer.Connection#responseWriteLock}
+     *
+     * @param call the call
+     * @return true if we proceed the call fully, false otherwise.
+     * @throws IOException
+     */
+    private boolean processResponse(final Call call) throws IOException {
+      boolean error = true;
+      try {
+        // Send as much data as we can in the non-blocking fashion
+        long numBytes = channelWrite(call.getConnection().channel,
+            call.response);
+        if (numBytes < 0) {
+          throw new HBaseIOException("Error writing on the socket " +
+            "for the call:" + call.toShortString());
+        }
+        error = false;
+      } finally {
+        if (error) {
+          LOG.debug(getName() + call.toShortString() + ": output error -- closing");
+          // We will be closing this connection itself. Mark this call as done so that all the
+          // buffer(s) it got from pool can get released
+          call.done();
+          closeConnection(call.getConnection());
+        }
+      }
+
+      if (!call.response.hasRemaining()) {
+        call.done();
+        return true;
+      } else {
+        return false; // Socket can't take more, we will have to come back.
+      }
+    }
+
+    /**
+     * Process all the responses for this connection
+     *
+     * @return true if all the calls were processed or that someone else is doing it.
+     * false if there * is still some work to do. In this case, we expect the caller to
+     * delay us.
+     * @throws IOException
+     */
+    private boolean processAllResponses(final Connection connection) throws IOException {
+      // We want only one writer on the channel for a connection at a time.
+      connection.responseWriteLock.lock();
+      try {
+        for (int i = 0; i < 20; i++) {
+          // protection if some handlers manage to need all the responder
+          Call call = connection.responseQueue.pollFirst();
+          if (call == null) {
+            return true;
+          }
+          if (!processResponse(call)) {
+            connection.responseQueue.addFirst(call);
+            return false;
+          }
+        }
+      } finally {
+        connection.responseWriteLock.unlock();
+      }
+
+      return connection.responseQueue.isEmpty();
+    }
+
+    //
+    // Enqueue a response from the application.
+    //
+    void doRespond(Call call) throws IOException {
+      boolean added = false;
+
+      // If there is already a write in progress, we don't wait. This allows to free the handlers
+      //  immediately for other tasks.
+      if (call.getConnection().responseQueue.isEmpty()
+          && call.getConnection().responseWriteLock.tryLock()) {
+        try {
+          if (call.getConnection().responseQueue.isEmpty()) {
+            // If we're alone, we can try to do a direct call to the socket. It's
+            //  an optimisation to save on context switches and data transfer between cores..
+            if (processResponse(call)) {
+              return; // we're done.
+            }
+            // Too big to fit, putting ahead.
+            call.getConnection().responseQueue.addFirst(call);
+            added = true; // We will register to the selector later, outside of the lock.
+          }
+        } finally {
+          call.getConnection().responseWriteLock.unlock();
+        }
+      }
+
+      if (!added) {
+        call.getConnection().responseQueue.addLast(call);
+      }
+      call.responder.registerForWrite(call.getConnection());
+
+      // set the serve time when the response has to be sent later
+      call.timestamp = System.currentTimeMillis();
+    }
+  }
+
+  /** Reads calls from a connection and queues them for handling. */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(
+      value="VO_VOLATILE_INCREMENT",
+      justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/")
+  public class Connection extends RpcServer.Connection {
+
+    protected SocketChannel channel;
+    private ByteBuff data;
+    private ByteBuffer dataLengthBuffer;
+    protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<Call>();
+    private final Lock responseWriteLock = new ReentrantLock();
+    private LongAdder rpcCount = new LongAdder(); // number of outstanding rpcs
+    private long lastContact;
+    protected Socket socket;
+
+    private ByteBuffer unwrappedData;
+    // When is this set?  FindBugs wants to know!  Says NP
+    private ByteBuffer unwrappedDataLengthBuffer = ByteBuffer.allocate(4);
+
+    private final Call authFailedCall = new Call(AUTHORIZATION_FAILED_CALLID, null, null, null,
+        null, null, this, null, 0, null, null, 0, null);
+
+    private final Call saslCall = new Call(SASL_CALLID, null, null, null, null, null, this, null,
+        0, null, null, 0, null);
+
+    private final Call setConnectionHeaderResponseCall = new Call(CONNECTION_HEADER_RESPONSE_CALLID,
+        null, null, null, null, null, this, null, 0, null, null, 0, null);
+
+    public Connection(SocketChannel channel, long lastContact) {
+      super();
+      this.channel = channel;
+      this.lastContact = lastContact;
+      this.data = null;
+      this.dataLengthBuffer = ByteBuffer.allocate(4);
+      this.socket = channel.socket();
+      this.addr = socket.getInetAddress();
+      if (addr == null) {
+        this.hostAddress = "*Unknown*";
+      } else {
+        this.hostAddress = addr.getHostAddress();
+      }
+      this.remotePort = socket.getPort();
+      if (socketSendBufferSize != 0) {
+        try {
+          socket.setSendBufferSize(socketSendBufferSize);
+        } catch (IOException e) {
+          LOG.warn("Connection: unable to set socket send buffer size to " +
+                   socketSendBufferSize);
+        }
+      }
+    }
+
+    public void setLastContact(long lastContact) {
+      this.lastContact = lastContact;
+    }
+
+    public long getLastContact() {
+      return lastContact;
+    }
+
+    /* Return true if the connection has no outstanding rpc */
+    private boolean isIdle() {
+      return rpcCount.sum() == 0;
+    }
+
+    /* Decrement the outstanding RPC count */
+    protected void decRpcCount() {
+      rpcCount.decrement();
+    }
+
+    /* Increment the outstanding RPC count */
+    protected void incRpcCount() {
+      rpcCount.increment();
+    }
+
+    private void saslReadAndProcess(ByteBuff saslToken) throws IOException,
+        InterruptedException {
+      if (saslContextEstablished) {
+        if (LOG.isTraceEnabled())
+          LOG.trace("Have read input token of size " + saslToken.limit()
+              + " for processing by saslServer.unwrap()");
+
+        if (!useWrap) {
+          processOneRpc(saslToken);
+        } else {
+          byte[] b = saslToken.hasArray() ? saslToken.array() : saslToken.toBytes();
+          byte [] plaintextData;
+          if (useCryptoAesWrap) {
+            // unwrap with CryptoAES
+            plaintextData = cryptoAES.unwrap(b, 0, b.length);
+          } else {
+            plaintextData = saslServer.unwrap(b, 0, b.length);
+          }
+          processUnwrappedData(plaintextData);
+        }
+      } else {
+        byte[] replyToken;
+        try {
+          if (saslServer == null) {
+            switch (authMethod) {
+            case DIGEST:
+              if (secretManager == null) {
+                throw new AccessDeniedException(
+                    "Server is not configured to do DIGEST authentication.");
+              }
+              saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
+                  .getMechanismName(), null, SaslUtil.SASL_DEFAULT_REALM,
+                  HBaseSaslRpcServer.getSaslProps(), new SaslDigestCallbackHandler(
+                      secretManager, this));
+              break;
+            default:
+              UserGroupInformation current = UserGroupInformation.getCurrentUser();
+              String fullName = current.getUserName();
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Kerberos principal name is " + fullName);
+              }
+              final String names[] = SaslUtil.splitKerberosName(fullName);
+              if (names.length != 3) {
+                throw new AccessDeniedException(
+                    "Kerberos principal name does NOT have the expected "
+                        + "hostname part: " + fullName);
+              }
+              current.doAs(new PrivilegedExceptionAction<Object>() {
+                @Override
+                public Object run() throws SaslException {
+                  saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS
+                      .getMechanismName(), names[0], names[1],
+                      HBaseSaslRpcServer.getSaslProps(), new SaslGssCallbackHandler());
+                  return null;
+                }
+              });
+            }
+            if (saslServer == null)
+              throw new AccessDeniedException(
+                  "Unable to find SASL server implementation for "
+                      + authMethod.getMechanismName());
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Created SASL server with mechanism = " + authMethod.getMechanismName());
+            }
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Have read input token of size " + saslToken.limit()
+                + " for processing by saslServer.evaluateResponse()");
+          }
+          replyToken = saslServer
+              .evaluateResponse(saslToken.hasArray() ? saslToken.array() : saslToken.toBytes());
+        } catch (IOException e) {
+          IOException sendToClient = e;
+          Throwable cause = e;
+          while (cause != null) {
+            if (cause instanceof InvalidToken) {
+              sendToClient = (InvalidToken) cause;
+              break;
+            }
+            cause = cause.getCause();
+          }
+          doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(),
+            sendToClient.getLocalizedMessage());
+          metrics.authenticationFailure();
+          String clientIP = this.toString();
+          // attempting user could be null
+          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
+          throw e;
+        }
+        if (replyToken != null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Will send token of size " + replyToken.length
+                + " from saslServer.");
+          }
+          doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null,
+              null);
+        }
+        if (saslServer.isComplete()) {
+          String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
+          useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
+          ugi = getAuthorizedUgi(saslServer.getAuthorizationID());
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("SASL server context established. Authenticated client: "
+              + ugi + ". Negotiated QoP is "
+              + saslServer.getNegotiatedProperty(Sasl.QOP));
+          }
+          metrics.authenticationSuccess();
+          AUDITLOG.info(AUTH_SUCCESSFUL_FOR + ugi);
+          saslContextEstablished = true;
+        }
+      }
+    }
+
+    /**
+     * No protobuf encoding of raw sasl messages
+     */
+    private void doRawSaslReply(SaslStatus status, Writable rv,
+        String errorClass, String error) throws IOException {
+      ByteBufferOutputStream saslResponse = null;
+      DataOutputStream out = null;
+      try {
+        // In my testing, have noticed that sasl messages are usually
+        // in the ballpark of 100-200. That's why the initial capacity is 256.
+        saslResponse = new ByteBufferOutputStream(256);
+        out = new DataOutputStream(saslResponse);
+        out.writeInt(status.state); // write status
+        if (status == SaslStatus.SUCCESS) {
+          rv.write(out);
+        } else {
+          WritableUtils.writeString(out, errorClass);
+          WritableUtils.writeString(out, error);
+        }
+        saslCall.setSaslTokenResponse(saslResponse.getByteBuffer());
+        saslCall.responder = responder;
+        saslCall.sendResponseIfReady();
+      } finally {
+        if (saslResponse != null) {
+          saslResponse.close();
+        }
+        if (out != null) {
+          out.close();
+        }
+      }
+    }
+
+    /**
+     * Send the response for connection header
+     */
+    private void doConnectionHeaderResponse(byte[] wrappedCipherMetaData) throws IOException {
+      ByteBufferOutputStream response = null;
+      DataOutputStream out = null;
+      try {
+        response = new ByteBufferOutputStream(wrappedCipherMetaData.length + 4);
+        out = new DataOutputStream(response);
+        out.writeInt(wrappedCipherMetaData.length);
+        out.write(wrappedCipherMetaData);
+
+        setConnectionHeaderResponseCall.setConnectionHeaderResponse(response.getByteBuffer());
+        setConnectionHeaderResponseCall.responder = responder;
+        setConnectionHeaderResponseCall.sendResponseIfReady();
+      } finally {
+        if (out != null) {
+          out.close();
+        }
+        if (response != null) {
+          response.close();
+        }
+      }
+    }
+
+    private void disposeSasl() {
+      if (saslServer != null) {
+        try {
+          saslServer.dispose();
+          saslServer = null;
+        } catch (SaslException ignored) {
+          // Ignored. This is being disposed of anyway.
+        }
+      }
+    }
+
+    private int readPreamble() throws IOException {
+      int count;
+      // Check for 'HBas' magic.
+      this.dataLengthBuffer.flip();
+      if (!Arrays.equals(HConstants.RPC_HEADER, dataLengthBuffer.array())) {
+        return doBadPreambleHandling("Expected HEADER=" +
+            Bytes.toStringBinary(HConstants.RPC_HEADER) +
+            " but received HEADER=" + Bytes.toStringBinary(dataLengthBuffer.array()) +
+            " from " + toString());
+      }
+      // Now read the next two bytes, the version and the auth to use.
+      ByteBuffer versionAndAuthBytes = ByteBuffer.allocate(2);
+      count = channelRead(channel, versionAndAuthBytes);
+      if (count < 0 || versionAndAuthBytes.remaining() > 0) {
+        return count;
+      }
+      int version = versionAndAuthBytes.get(0);
+      byte authbyte = versionAndAuthBytes.get(1);
+      this.authMethod = AuthMethod.valueOf(authbyte);
+      if (version != CURRENT_VERSION) {
+        String msg = getFatalConnectionString(version, authbyte);
+        return doBadPreambleHandling(msg, new WrongVersionException(msg));
+      }
+      if (authMethod == null) {
+        String msg = getFatalConnectionString(version, authbyte);
+        return doBadPreambleHandling(msg, new BadAuthException(msg));
+      }
+      if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
+        if (allowFallbackToSimpleAuth) {
+          metrics.authenticationFallback();
+          authenticatedWithFallback = true;
+        } else {
+          AccessDeniedException ae = new AccessDeniedException("Authentication is required");
+          setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage());
+          responder.doRespond(authFailedCall);
+          throw ae;
+        }
+      }
+      if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) {
+        doRawSaslReply(SaslStatus.SUCCESS, new IntWritable(
+            SaslUtil.SWITCH_TO_SIMPLE_AUTH), null, null);
+        authMethod = AuthMethod.SIMPLE;
+        // client has already sent the initial Sasl message and we
+        // should ignore it. Both client and server should fall back
+        // to simple auth from now on.
+        skipInitialSaslHandshake = true;
+      }
+      if (authMethod != AuthMethod.SIMPLE) {
+        useSasl = true;
+      }
+
+      dataLengthBuffer.clear();
+      connectionPreambleRead = true;
+      return count;
+    }
+
+    private int read4Bytes() throws IOException {
+      if (this.dataLengthBuffer.remaining() > 0) {
+        return channelRead(channel, this.dataLengthBuffer);
+      } else {
+        return 0;
+      }
+    }
+
+    /**
+     * Read off the wire. If there is not enough data to read, update the connection state with
+     *  what we have and returns.
+     * @return Returns -1 if failure (and caller will close connection), else zero or more.
+     * @throws IOException
+     * @throws InterruptedException
+     */
+    public int readAndProcess() throws IOException, InterruptedException {
+      // Try and read in an int.  If new connection, the int will hold the 'HBas' HEADER.  If it
+      // does, read in the rest of the connection preamble, the version and the auth method.
+      // Else it will be length of the data to read (or -1 if a ping).  We catch the integer
+      // length into the 4-byte this.dataLengthBuffer.
+      int count = read4Bytes();
+      if (count < 0 || dataLengthBuffer.remaining() > 0) {
+        return count;
+      }
+
+      // If we have not read the connection setup preamble, look to see if that is on the wire.
+      if (!connectionPreambleRead) {
+        count = readPreamble();
+        if (!connectionPreambleRead) {
+          return count;
+        }
+
+        count = read4Bytes();
+        if (count < 0 || dataLengthBuffer.remaining() > 0) {
+          return count;
+        }
+      }
+
+      // We have read a length and we have read the preamble.  It is either the connection header
+      // or it is a request.
+      if (data == null) {
+        dataLengthBuffer.flip();
+        int dataLength = dataLengthBuffer.getInt();
+        if (dataLength == RpcClient.PING_CALL_ID) {
+          if (!useWrap) { //covers the !useSasl too
+            dataLengthBuffer.clear();
+            return 0;  //ping message
+          }
+        }
+        if (dataLength < 0) { // A data length of zero is legal.
+          throw new DoNotRetryIOException("Unexpected data length "
+              + dataLength + "!! from " + getHostAddress());
+        }
+
+        if (dataLength > maxRequestSize) {
+          String msg = "RPC data length of " + dataLength + " received from "
+              + getHostAddress() + " is greater than max allowed "
+              + maxRequestSize + ". Set \"" + MAX_REQUEST_SIZE
+              + "\" on server to override this limit (not recommended)";
+          LOG.warn(msg);
+
+          if (connectionHeaderRead && connectionPreambleRead) {
+            incRpcCount();
+            // Construct InputStream for the non-blocking SocketChannel
+            // We need the InputStream because we want to read only the request header
+            // instead of the whole rpc.
+            ByteBuffer buf = ByteBuffer.allocate(1);
+            InputStream is = new InputStream() {
+              @Override
+              public int read() throws IOException {
+                channelRead(channel, buf);
+                buf.flip();
+                int x = buf.get();
+                buf.flip();
+                return x;
+              }
+            };
+            CodedInputStream cis = CodedInputStream.newInstance(is);
+            int headerSize = cis.readRawVarint32();
+            Message.Builder builder = RequestHeader.newBuilder();
+            ProtobufUtil.mergeFrom(builder, cis, headerSize);
+            RequestHeader header = (RequestHeader) builder.build();
+
+            // Notify the client about the offending request
+            Call reqTooBig = new Call(header.getCallId(), this.service, null, null, null,
+                null, this, responder, 0, null, this.addr, 0, null);
+            metrics.exception(REQUEST_TOO_BIG_EXCEPTION);
+            // Make sure the client recognizes the underlying exception
+            // Otherwise, throw a DoNotRetryIOException.
+            if (VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(),
+                RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) {
+              setupResponse(null, reqTooBig, REQUEST_TOO_BIG_EXCEPTION, msg);
+            } else {
+              setupResponse(null, reqTooBig, new DoNotRetryIOException(), msg);
+            }
+            // We are going to close the connection, make sure we process the response
+            // before that. In rare case when this fails, we still close the connection.
+            responseWriteLock.lock();
+            responder.processResponse(reqTooBig);
+            responseWriteLock.unlock();
+          }
+          // Close the connection
+          return -1;
+        }
+
+        // Initialize this.data with a ByteBuff.
+        // This call will allocate a ByteBuff to read request into and assign to this.data
+        // Also when we use some buffer(s) from pool, it will create a CallCleanup instance also and
+        // assign to this.callCleanup
+        initByteBuffToReadInto(dataLength);
+
+        // Increment the rpc count. This counter will be decreased when we write
+        //  the response.  If we want the connection to be detected as idle properly, we
+        //  need to keep the inc / dec correct.
+        incRpcCount();
+      }
+
+      count = channelDataRead(channel, data);
+
+      if (count >= 0 && data.remaining() == 0) { // count==0 if dataLength == 0
+        process();
+      }
+
+      return count;
+    }
+
+    // It creates the ByteBuff and CallCleanup and assign to Connection instance.
+    private void initByteBuffToReadInto(int length) {
+      // We create random on heap buffers are read into those when
+      // 1. ByteBufferPool is not there.
+      // 2. When the size of the req is very small. Using a large sized (64 KB) buffer from pool is
+      // waste then. Also if all the reqs are of this size, we will be creating larger sized
+      // buffers and pool them permanently. This include Scan/Get request and DDL kind of reqs like
+      // RegionOpen.
+      // 3. If it is an initial handshake signal or initial connection request. Any way then
+      // condition 2 itself will match
+      // 4. When SASL use is ON.
+      if (reservoir == null || skipInitialSaslHandshake || !connectionHeaderRead || useSasl
+          || length < minSizeForReservoirUse) {
+        this.data = new SingleByteBuff(ByteBuffer.allocate(length));
+      } else {
+        Pair<ByteBuff, CallCleanup> pair = RpcServer.allocateByteBuffToReadInto(reservoir,
+            minSizeForReservoirUse, length);
+        this.data = pair.getFirst();
+        this.callCleanup = pair.getSecond();
+      }
+    }
+
+    protected int channelDataRead(ReadableByteChannel channel, ByteBuff buf) throws IOException {
+      int count = buf.read(channel);
+      if (count > 0) {
+        metrics.receivedBytes(count);
+      }
+      return count;
+    }
+
+    /**
+     * Process the data buffer and clean the connection state for the next call.
+     */
+    private void process() throws IOException, InterruptedException {
+      data.rewind();
+      try {
+        if (skipInitialSaslHandshake) {
+          skipInitialSaslHandshake = false;
+          return;
+        }
+
+        if (useSasl) {
+          saslReadAndProcess(data);
+        } else {
+          processOneRpc(data);
+        }
+
+      } finally {
+        dataLengthBuffer.clear(); // Clean for the next call
+        data = null; // For the GC
+        this.callCleanup = null;
+      }
+    }
+
+    private int doBadPreambleHandling(final String msg) throws IOException {
+      return doBadPreambleHandling(msg, new FatalConnectionException(msg));
+    }
+
+    private int doBadPreambleHandling(final String msg, final Exception e) throws IOException {
+      LOG.warn(msg);
+      Call fakeCall = new Call(-1, null, null, null, null, null, this, responder, -1, null, null, 0,
+          null);
+      setupResponse(null, fakeCall, e, msg);
+      responder.doRespond(fakeCall);
+      // Returning -1 closes out the connection.
+      return -1;
+    }
+
+    // Reads the connection header following version
+    private void processConnectionHeader(ByteBuff buf) throws IOException {
+      if (buf.hasArray()) {
+        this.connectionHeader = ConnectionHeader.parseFrom(buf.array());
+      } else {
+        CodedInputStream cis = UnsafeByteOperations
+            .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput();
+        cis.enableAliasing(true);
+        this.connectionHeader = ConnectionHeader.parseFrom(cis);
+      }
+      String serviceName = connectionHeader.getServiceName();
+      if (serviceName == null) throw new EmptyServiceNameException();
+      this.service = getService(services, serviceName);
+      if (this.service == null) throw new UnknownServiceException(serviceName);
+      setupCellBlockCodecs(this.connectionHeader);
+      RPCProtos.ConnectionHeaderResponse.Builder chrBuilder =
+          RPCProtos.ConnectionHeaderResponse.newBuilder();
+      setupCryptoCipher(this.connectionHeader, chrBuilder);
+      responseConnectionHeader(chrBuilder);
+      UserGroupInformation protocolUser = createUser(connectionHeader);
+      if (!useSasl) {
+        ugi = protocolUser;
+        if (ugi != null) {
+          ugi.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod);
+        }
+        // audit logging for SASL authenticated users happens in saslReadAndProcess()
+        if (authenticatedWithFallback) {
+          LOG.warn("Allowed fallback to SIMPLE auth for " + ugi
+              + " connecting from " + getHostAddress());
+        }
+        AUDITLOG.info(AUTH_SUCCESSFUL_FOR + ugi);
+      } else {
+        // user is authenticated
+        ugi.setAuthenticationMethod(authMethod.authenticationMethod);
+        //Now we check if this is a proxy user case. If the protocol user is
+        //different from the 'user', it is a proxy user scenario. However,
+        //this is not allowed if user authenticated with DIGEST.
+        if ((protocolUser != null)
+            && (!protocolUser.getUserName().equals(ugi.getUserName()))) {
+          if (authMethod == AuthMethod.DIGEST) {
+            // Not allowed to doAs if token authentication is used
+            throw new AccessDeniedException("Authenticated user (" + ugi
+                + ") doesn't match what the client claims to be ("
+                + protocolUser + ")");
+          } else {
+            // Effective user can be different from authenticated user
+            // for simple auth or kerberos auth
+            // The user is the real user. Now we create a proxy user
+            UserGroupInformation realUser = ugi;
+            ugi = UserGroupInformation.createProxyUser(protocolUser
+                .getUserName(), realUser);
+            // Now the user is a proxy user, set Authentication method Proxy.
+            ugi.setAuthenticationMethod(AuthenticationMethod.PROXY);
+          }
+        }
+      }
+      if (connectionHeader.hasVersionInfo()) {
+        // see if this connection will support RetryImmediatelyException
+        retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2);
+
+        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
+            + " with version info: "
+            + TextFormat.shortDebugString(connectionHeader.getVersionInfo()));
+      } else {
+        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
+            + " with unknown version info");
+      }
+    }
+
+    private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder)
+        throws FatalConnectionException {
+      // Response the connection header if Crypto AES is enabled
+      if (!chrBuilder.hasCryptoCipherMeta()) return;
+      try {
+        byte[] connectionHeaderResBytes = chrBuilder.build().toByteArray();
+        // encrypt the Crypto AES cipher meta data with sasl server, and send to client
+        byte[] unwrapped = new byte[connectionHeaderResBytes.length + 4];
+        Bytes.putBytes(unwrapped, 0, Bytes.toBytes(connectionHeaderResBytes.length), 0, 4);
+        Bytes.putBytes(unwrapped, 4, connectionHeaderResBytes, 0, connectionHeaderResBytes.length);
+
+        doConnectionHeaderResponse(saslServer.wrap(unwrapped, 0, unwrapped.length));
+      } catch (IOException ex) {
+        throw new UnsupportedCryptoException(ex.getMessage(), ex);
+      }
+    }
+
+    private void processUnwrappedData(byte[] inBuf) throws IOException,
+    InterruptedException {
+      ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(inBuf));
+      // Read all RPCs contained in the inBuf, even partial ones
+      while (true) {
+        int count;
+        if (unwrappedDataLengthBuffer.remaining() > 0) {
+          count = channelRead(ch, unwrappedDataLengthBuffer);
+          if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0)
+            return;
+        }
+
+        if (unwrappedData == null) {
+          unwrappedDataLengthBuffer.flip();
+          int unwrappedDataLength = unwrappedDataLengthBuffer.getInt();
+
+          if (unwrappedDataLength == RpcClient.PING_CALL_ID) {
+            if (LOG.isDebugEnabled())
+              LOG.debug("Received ping message");
+            unwrappedDataLengthBuffer.clear();
+            continue; // ping message
+          }
+          unwrappedData = ByteBuffer.allocate(unwrappedDataLength);
+        }
+
+        count = channelRead(ch, unwrappedData);
+        if (count <= 0 || unwrappedData.remaining() > 0)
+          return;
+
+        if (unwrappedData.remaining() == 0) {
+          unwrappedDataLengthBuffer.clear();
+          unwrappedData.flip();
+          processOneRpc(new SingleByteBuff(unwrappedData));
+          unwrappedData = null;
+        }
+      }
+    }
+
+    private void processOneRpc(ByteBuff buf) throws IOException, InterruptedException {
+      if (connectionHeaderRead) {
+        processRequest(buf);
+      } else {
+        processConnectionHeader(buf);
+        this.connectionHeaderRead = true;
+        if (!authorizeConnection()) {
+          // Throw FatalConnectionException wrapping ACE so client does right thing and closes
+          // down the connection instead of trying to read non-existent retun.
+          throw new AccessDeniedException("Connection from " + this + " for service " +
+            connectionHeader.getServiceName() + " is unauthorized for user: " + ugi);
+        }
+        this.user = userProvider.create(this.ugi);
+      }
+    }
+
+    /**
+     * @param buf Has the request header and the request param and optionally encoded data buffer
+     * all in this one array.
+     * @throws IOException
+     * @throws InterruptedException
+     */
+    protected void processRequest(ByteBuff buf) throws IOException, InterruptedException {
+      long totalRequestSize = buf.limit();
+      int offset = 0;
+      // Here we read in the header.  We avoid having pb
+      // do its default 4k allocation for CodedInputStream.  We force it to use backing array.
+      CodedInputStream cis;
+      if (buf.hasArray()) {
+        cis = UnsafeByteOperations.unsafeWrap(buf.array(), 0, buf.limit()).newCodedInput();
+      } else {
+        cis = UnsafeByteOperations
+            .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput();
+      }
+      cis.enableAliasing(true);
+      int headerSize = cis.readRawVarint32();
+      offset = cis.getTotalBytesRead();
+      Message.Builder builder = RequestHeader.newBuilder();
+      ProtobufUtil.mergeFrom(builder, cis, headerSize);
+      RequestHeader header = (RequestHeader) builder.build();
+      offset += headerSize;
+      int id = header.getCallId();
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) +
+          " totalRequestSize: " + totalRequestSize + " bytes");
+      }
+      // Enforcing the call queue size, this triggers a retry in the client
+      // This is a bit late to be doing this check - we have already read in the total request.
+      if ((totalRequestSize + callQueueSizeInBytes.sum()) > maxQueueSizeInBytes) {
+        final Call callTooBig =
+          new Call(id, this.service, null, null, null, null, this,
+            responder, totalRequestSize, null, null, 0, this.callCleanup);
+        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
+        metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
+        setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
+            "Call queue is full on " + server.getServerName() +
+                ", is hbase.ipc.server.max.callqueue.size too small?");
+        responder.doRespond(callTooBig);
+        return;
+      }
+      MethodDescriptor md = null;
+      Message param = null;
+      CellScanner cellScanner = null;
+      try {
+        if (header.hasRequestParam() && header.getRequestParam()) {
+          md = this.service.getDescriptorForType().findMethodByName(header.getMethodName());
+          if (md == null) throw new UnsupportedOperationException(header.getMethodName());
+          builder = this.service.getRequestPrototype(md).newBuilderForType();
+          cis.resetSizeCounter();
+          int paramSize = cis.readRawVarint32();
+          offset += cis.getTotalBytesRead();
+          if (builder != null) {
+            ProtobufUtil.mergeFrom(builder, cis, paramSize);
+            param = builder.build();
+          }
+          offset += paramSize;
+        } else {
+          // currently header must have request param, so we directly throw exception here
+          String msg = "Invalid request header: " + TextFormat.shortDebugString(header)
+              + ", should have param set in it";
+          LOG.warn(msg);
+          throw new DoNotRetryIOException(msg);
+        }
+        if (header.hasCellBlockMeta()) {
+          buf.position(offset);
+          ByteBuff dup = buf.duplicate();
+          dup.limit(offset + header.getCellBlockMeta().getLength());
+          cellScanner = cellBlockBuilder.createCellScannerReusingBuffers(this.codec,
+              this.compressionCodec, dup);
+        }
+      } catch (Throwable t) {
+        InetSocketAddress address = getListenerAddress();
+        String msg = (address != null ? address : "(channel closed)") +
+            " is unable to read call parameter from client " + getHostAddress();
+        LOG.warn(msg, t);
+
+        metrics.exception(t);
+
+        // probably the hbase hadoop version does not match the running hadoop version
+        if (t instanceof LinkageError) {
+          t = new DoNotRetryIOException(t);
+        }
+        // If the method is not present on the server, do not retry.
+        if (t instanceof UnsupportedOperationException) {
+          t = new DoNotRetryIOException(t);
+        }
+
+        final Call readParamsFailedCall =
+          new Call(id, this.service, null, null, null, null, this,
+            responder, totalRequestSize, null, null, 0, this.callCleanup);
+        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
+        setupResponse(responseBuffer, readParamsFailedCall, t,
+          msg + "; " + t.getMessage());
+        responder.doRespond(readParamsFailedCall);
+        return;
+      }
+
+      TraceInfo traceInfo = header.hasTraceInfo()
+          ? new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
+          : null;
+      int timeout = 0;
+      if (header.hasTimeout() && header.getTimeout() > 0){
+        timeout = Math.max(minClientRequestTimeout, header.getTimeout());
+      }
+      Call call = new Call(id, this.service, md, header, param, cellScanner, this, responder,
+          totalRequestSize, traceInfo, this.addr, timeout, this.callCleanup);
+
+      if (!scheduler.dispatch(new CallRunner(SimpleRpcServer.this, call))) {
+        callQueueSizeInBytes.add(-1 * call.getSize());
+
+        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
+        metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
+        setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
+            "Call queue is full on " + server.getServerName() +
+                ", too many items queued ?");
+        responder.doRespond(call);
+      }
+    }
+
+    private boolean authorizeConnection() throws IOException {
+      try {
+        // If auth method is DIGEST, the token was obtained by the
+        // real user for the effective user, therefore not required to
+        // authorize real user. doAs is allowed only for simple or kerberos
+        // authentication
+        if (ugi != null && ugi.getRealUser() != null
+            && (authMethod != AuthMethod.DIGEST)) {
+          ProxyUsers.authorize(ugi, this.getHostAddress(), conf);
+        }
+        authorize(ugi, connectionHeader, getHostInetAddress());
+        metrics.authorizationSuccess();
+      } catch (AuthorizationException ae) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Connection authorization failed: " + ae.getMessage(), ae);
+        }
+        metrics.authorizationFailure();
+        setupResponse(authFailedResponse, authFailedCall,
+          new AccessDeniedException(ae), ae.getMessage());
+        responder.doRespond(authFailedCall);
+        return false;
+      }
+      return true;
+    }
+
+    protected synchronized void close() {
+      disposeSasl();
+      data = null;
+      callCleanup = null;
+      if (!channel.isOpen())
+        return;
+      try {socket.shutdownOutput();} catch(Exception ignored) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Ignored exception", ignored);
+        }
+      }
+      if (channel.isOpen()) {
+        try {channel.close();} catch(Exception ignored) {}
+      }
+      try {
+        socket.close();
+      } catch(Exception ignored) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Ignored exception", ignored);
+        }
+      }
+    }
+
+    @Override
+    public boolean isConnectionOpen() {
+      return channel.isOpen();
+    }
+  }
+
+
+  /**
+   * Constructs a server listening on the named port and address.
+   * @param server hosting instance of {@link Server}. We will do authentications if an
+   * instance else pass null for no authentication check.
+   * @param name Used keying this rpc servers' metrics and for naming the Listener thread.
+   * @param services A list of services.
+   * @param bindAddress Where to listen
+   * @param conf
+   * @param scheduler
+   */
+  public SimpleRpcServer(final Server server, final String name,
+      final List<BlockingServiceAndInterface> services,
+      final InetSocketAddress bindAddress, Configuration conf,
+      RpcScheduler scheduler)
+      throws IOException {
+    super(server, name, services, bindAddress, conf, scheduler);
+    this.socketSendBufferSize = 0;
+    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
+    this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
+      2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
+
+    // Start the listener here and let it bind to the port
+    listener = new Listener(name);
+    this.port = listener.getAddress().getPort();
+
+    // Create the responder here
+    responder = new Responder();
+    connectionManager = new ConnectionManager();
+    initReconfigurable(conf);
+
+    this.scheduler.init(new RpcSchedulerContext(this));
+  }
+
+  /**
+   * Subclasses of HBaseServer can override this to provide their own
+   * Connection implementations.
+   */
+  protected Connection getConnection(SocketChannel channel, long time) {
+    return new Connection(channel, time);
+  }
+
+  /**
+   * Setup response for the RPC Call.
+   *
+   * @param response buffer to serialize the response into
+   * @param call {@link Call} to which we are setting up the response
+   * @param error error message, if the call failed
+   * @throws IOException
+   */
+  private void setupResponse(ByteArrayOutputStream response, Call call, Throwable t, String error)
+  throws IOException {
+    if (response != null) response.reset();
+    call.setResponse(null, null, t, error);
+  }
+
+  protected void closeConnection(Connection connection) {
+    connectionManager.close(connection);
+  }
+
+  /** Sets the socket buffer size used for responding to RPCs.
+   * @param size send size
+   */
+  @Override
+  public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
+
+  /** Starts the service.  Must be called before any calls will be handled. */
+  @Override
+  public synchronized void start() {
+    if (started) return;
+    authTokenSecretMgr = createSecretManager();
+    if (authTokenSecretMgr != null) {
+      setSecretManager(authTokenSecretMgr);
+      authTokenSecretMgr.start();
+    }
+    this.authManager = new ServiceAuthorizationManager();
+    HBasePolicyProvider.init(conf, authManager);
+    responder.start();
+    listener.start();
+    scheduler.start();
+    started = true;
+  }
+
+  /** Stops the service.  No new calls will be handled after this is called. */
+  @Override
+  public synchronized void stop() {
+    LOG.info("Stopping server on " + port);
+    running = false;
+    if (authTokenSecretMgr != null) {
+      authTokenSecretMgr.stop();
+      authTokenSecretMgr = null;
+    }
+    listener.interrupt();
+    listener.doStop();
+    responder.interrupt();
+    scheduler.stop();
+    notifyAll();
+  }
+
+  /** Wait for the server to be stopped.
+   * Does not wait for all subthreads to finish.
+   *  See {@link #stop()}.
+   * @throws InterruptedException e
+   */
+  @Override
+  public synchronized void join() throws InterruptedException {
+    while (running) {
+      wait();
+    }
+  }
+
+  /**
+   * Return the socket (ip+port) on which the RPC server is listening to. May return null if
+   * the listener channel is closed.
+   * @return the socket (ip+port) on which the RPC server is listening to, or null if this
+   * information cannot be determined
+   */
+  @Override
+  public synchronized InetSocketAddress getListenerAddress() {
+    if (listener == null) {
+      return null;
+    }
+    return listener.getAddress();
+  }
+
+  public Pair<Message, CellScanner> call(BlockingService service, MethodDescriptor md,
+      Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status)
+      throws IOException {
+    return call(service, md, param, cellScanner, receiveTime, status, System.currentTimeMillis(),0);
+  }
+
+  public Pair<Message, CellScanner> call(BlockingService service, MethodDescriptor md, Message param,
+      CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status, long startTime,
+      int timeout)
+      throws IOException {
+    Call fakeCall = new Call(-1, service, md, null, param, cellScanner, null, null, -1, null, null, timeout,
+      null);
+    fakeCall.setReceiveTime(receiveTime);
+    return call(fakeCall, status);
+  }
+
+  /**
+   * When the read or write buffer size is larger than this limit, i/o will be
+   * done in chunks of this size. Most RPC requests and responses would be
+   * be smaller.
+   */
+  private static int NIO_BUFFER_LIMIT = 64 * 1024; //should not be more than 64KB.
+
+  /**
+   * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}.
+   * If the amount of data is large, it writes to channel in smaller chunks.
+   * This is to avoid jdk from creating many direct buffers as the size of
+   * buffer increases. This also minimizes extra copies in NIO layer
+   * as a result of multiple write operations required to write a large
+   * buffer.
+   *
+   * @param channel writable byte channel to write to
+   * @param bufferChain Chain of buffers to write
+   * @return number of bytes written
+   * @throws java.io.IOException e
+   * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)
+   */
+  protected long channelWrite(GatheringByteChannel channel, BufferChain bufferChain)
+  throws IOException {
+    long count =  bufferChain.write(channel, NIO_BUFFER_LIMIT);
+    if (count > 0) this.metrics.sentBytes(count);
+    return count;
+  }
+
+  /**
+   * This is a wrapper around {@link java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)}.
+   * If the amount of data is large, it writes to channel in smaller chunks.
+   * This is to avoid jdk from creating many direct buffers as the size of
+   * ByteBuffer increases. There should not be any performance degredation.
+   *
+   * @param channel writable byte channel to write on
+   * @param buffer buffer to write
+   * @return number of bytes written
+   * @throws java.io.IOException e
+   * @see java.nio.channels.ReadableByteChannel#read(java.nio.ByteBuffer)
+   */
+  protected int channelRead(ReadableByteChannel channel,
+                                   ByteBuffer buffer) throws IOException {
+
+    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
+           channel.read(buffer) : channelIO(channel, null, buffer);
+    if (count > 0) {
+      metrics.receivedBytes(count);
+    }
+    return count;
+  }
+
+  /**
+   * Helper for {@link #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)}
+   * and {@link #channelWrite(GatheringByteChannel, BufferChain)}. Only
+   * one of readCh or writeCh should be non-null.
+   *
+   * @param readCh read channel
+   * @param writeCh write channel
+   * @param buf buffer to read or write into/out of
+   * @return bytes written
+   * @throws java.io.IOException e
+   * @see #channelRead(java.nio.channels.ReadableByteChannel, java.nio.ByteBuffer)
+   * @see #channelWrite(GatheringByteChannel, BufferChain)
+   */
+  protected static int channelIO(ReadableByteChannel readCh,
+                               WritableByteChannel writeCh,
+                               ByteBuffer buf) throws IOException {
+
+    int originalLimit = buf.limit();
+    int initialRemaining = buf.remaining();
+    int ret = 0;
+
+    while (buf.remaining() > 0) {
+      try {
+        int ioSize = Math.min(buf.remaining(), NIO_BUFFER_LIMIT);
+        buf.limit(buf.position() + ioSize);
+
+        ret = (readCh == null) ? writeCh.write(buf) : readCh.read(buf);
+
+        if (ret < ioSize) {
+          break;
+        }
+
+      } finally {
+        buf.limit(originalLimit);
+      }
+    }
+
+    int nBytes = initialRemaining - buf.remaining();
+    return (nBytes > 0) ? nBytes : ret;
+  }
+
+  /**
+   * A convenience method to bind to a given address and report
+   * better exceptions if the address is not a valid host.
+   * @param socket the socket to bind
+   * @param address the address to bind to
+   * @param backlog the number of connections allowed in the queue
+   * @throws BindException if the address can't be bound
+   * @throws UnknownHostException if the address isn't a valid host name
+   * @throws IOException other random errors from bind
+   */
+  public static void bind(ServerSocket socket, InetSocketAddress address,
+                          int backlog) throws IOException {
+    try {
+      socket.bind(address, backlog);
+    } catch (BindException e) {
+      BindException bindException =
+        new BindException("Problem binding to " + address + " : " +
+            e.getMessage());
+      bindException.initCause(e);
+      throw bindException;
+    } catch (SocketException e) {
+      // If they try to bind to a different host's address, give a better
+      // error message.
+      if ("Unresolved address".equals(e.getMessage())) {
+        throw new UnknownHostException("Invalid hostname for server: " +
+                                       address.getHostName());
+      }
+      throw e;
+    }
+  }
+
+  /**
+   * The number of open RPC conections
+   * @return the number of open rpc connections
+   */
+  public int getNumOpenConnections() {
+    return connectionManager.size();
+  }
+
+  private class ConnectionManager {
+    final private AtomicInteger count = new AtomicInteger();
+    final private Set<Connection> connections;
+
+    final private Timer idleScanTimer;
+    final private int idleScanThreshold;
+    final private int idleScanInterval;
+    final private int maxIdleTime;
+    final private int maxIdleToClose;
+
+    ConnectionManager() {
+      this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true);
+      this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
+      this.idleScanInterval =
+          conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000);
+      this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000);
+      this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10);
+      int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+          HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+      int maxConnectionQueueSize =
+          handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100);
+      // create a set with concurrency -and- a thread-safe iterator, add 2
+      // for listener and idle closer threads
+      this.connections = Collections.newSetFromMap(
+          new ConcurrentHashMap<Connection,Boolean>(
+              maxConnectionQueueSize, 0.75f, readThreads+2));
+    }
+
+    private boolean add(Connection connection) {
+      boolean added = connections.add(connection);
+      if (added) {
+        count.getAndIncrement();
+      }
+      return added;
+    }
+
+    private boolean remove(Connection connection) {
+      boolean removed = connections.remove(connection);
+      if (removed) {
+        count.getAndDecrement();
+      }
+      return removed;
+    }
+
+    int size() {
+      return count.get();
+    }
+
+    Connection[] toArray() {
+      return connections.toArray(new Connection[0]);
+    }
+
+    Connection register(SocketChannel channel) {
+      Connection connection = getConnection(channel, System.currentTimeMillis());
+      add(connection);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Server connection from " + connection +
+            "; connections=" + size() +
+            ", queued calls size (bytes)=" + callQueueSizeInBytes.sum() +
+            ", general queued calls=" + scheduler.getGeneralQueueLength() +
+            ", priority queued calls=" + scheduler.getPriorityQueueLength());
+      }
+      return connection;
+    }
+
+    boolean close(Connection connection) {
+      boolean exists = remove(connection);
+      if (exists) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(Thread.currentThread().getName() +
+              ": disconnecting client " + connection +
+              ". Number of active connections: "+ size());
+        }
+        // only close if actually removed to avoid double-closing due
+        // to possible races
+        connection.close();
+      }
+      return exists;
+    }
+
+    // synch'ed to avoid explicit invocation upon OOM from colliding with
+    // timer task firing
+    synchronized void closeIdle(boolean scanAll) {
+      long minLastContact = System.currentTimeMillis() - maxIdleTime;
+      // concurrent iterator might miss new connections added
+      // during the iteration, but that's ok because they won't
+      // be idle yet anyway and will be caught on next scan
+      int closed = 0;
+      for (Connection connection : connections) {
+        // stop if connections dropped below threshold unless scanning all
+        if (!scanAll && size() < idleScanThreshold) {
+          break;
+        }
+        // stop if not scanning all and max connections are closed
+        if (connection.isIdle() &&
+            connection.getLastContact() < minLastContact &&
+            close(connection) &&
+            !scanAll && (++closed == maxIdleToClose)) {
+          break;
+        }
+      }
+    }
+
+    void closeAll() {
+      // use a copy of the connections to be absolutely sure the concurrent
+      // iterator doesn't miss a connection
+      for (Connection connection : toArray()) {
+        close(connection);
+      }
+    }
+
+    void startIdleScan() {
+      scheduleIdleScanTask();
+    }
+
+    void stopIdleScan() {
+      idleScanTimer.cancel();
+    }
+
+    private void scheduleIdleScanTask() {
+      if (!running) {
+        return;
+      }
+      TimerTask idleScanTask = new TimerTask(){
+        @Override
+        public void run() {
+          if (!running) {
+            return;
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(Thread.currentThread().getName()+": task running");
+          }
+          try {
+            closeIdle(false);
+          } finally {
+            // explicitly reschedule so next execution occurs relative
+            // to the end of this scan, not the beginning
+            scheduleIdleScanTask();
+          }
+        }
+      };
+      idleScanTimer.schedule(idleScanTask, idleScanInterval);
+    }
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 4c5c935..27fef8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -91,6 +91,7 @@ import org.apache.hadoop.hbase.ipc.RpcCallContext;
 import org.apache.hadoop.hbase.ipc.RpcCallback;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.ipc.RpcServerFactory;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
@@ -1060,7 +1061,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     // Set how many times to retry talking to another server over Connection.
     ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
     try {
-      rpcServer = new RpcServer(rs, name, getServices(),
+      rpcServer = RpcServerFactory.createRpcServer(rs, name, getServices(),
           bindAddress, // use final bindAddress for this server.
           rs.conf,
           rpcSchedulerFactory.create(rs.conf, this, rs));


[20/50] [abbrv] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 2af3982..1794a49 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -65611,6 +65611,3387 @@ public final class MasterProtos {
 
   }
 
+  public interface ListDrainingRegionServersRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ListDrainingRegionServersRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListDrainingRegionServersRequest}
+   */
+  public  static final class ListDrainingRegionServersRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.ListDrainingRegionServersRequest)
+      ListDrainingRegionServersRequestOrBuilder {
+    // Use ListDrainingRegionServersRequest.newBuilder() to construct.
+    private ListDrainingRegionServersRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private ListDrainingRegionServersRequest() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListDrainingRegionServersRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.ListDrainingRegionServersRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.ListDrainingRegionServersRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.ListDrainingRegionServersRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.ListDrainingRegionServersRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<ListDrainingRegionServersRequest>() {
+      public ListDrainingRegionServersRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new ListDrainingRegionServersRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface ListDrainingRegionServersResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ListDrainingRegionServersResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>
+        getServerNameList();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index);
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    int getServerNameCount();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+        getServerNameOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListDrainingRegionServersResponse}
+   */
+  public  static final class ListDrainingRegionServersResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.ListDrainingRegionServersResponse)
+      ListDrainingRegionServersResponseOrBuilder {
+    // Use ListDrainingRegionServersResponse.newBuilder() to construct.
+    private ListDrainingRegionServersResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private ListDrainingRegionServersResponse() {
+      serverName_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListDrainingRegionServersResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              serverName_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          serverName_ = java.util.Collections.unmodifiableList(serverName_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.Builder.class);
+    }
+
+    public static final int SERVER_NAME_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> serverName_;
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
+      return serverName_;
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+        getServerNameOrBuilderList() {
+      return serverName_;
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public int getServerNameCount() {
+      return serverName_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
+      return serverName_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+        int index) {
+      return serverName_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      for (int i = 0; i < getServerNameCount(); i++) {
+        if (!getServerName(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      for (int i = 0; i < serverName_.size(); i++) {
+        output.writeMessage(1, serverName_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < serverName_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, serverName_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) obj;
+
+      boolean result = true;
+      result = result && getServerNameList()
+          .equals(other.getServerNameList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getServerNameCount() > 0) {
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerNameList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.ListDrainingRegionServersResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.ListDrainingRegionServersResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getServerNameFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (serverNameBuilder_ == null) {
+          serverName_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          serverNameBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse(this);
+        int from_bitField0_ = bitField0_;
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            serverName_ = java.util.Collections.unmodifiableList(serverName_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance()) return this;
+        if (serverNameBuilder_ == null) {
+          if (!other.serverName_.isEmpty()) {
+            if (serverName_.isEmpty()) {
+              serverName_ = other.serverName_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureServerNameIsMutable();
+              serverName_.addAll(other.serverName_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.serverName_.isEmpty()) {
+            if (serverNameBuilder_.isEmpty()) {
+              serverNameBuilder_.dispose();
+              serverNameBuilder_ = null;
+              serverName_ = other.serverName_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              serverNameBuilder_ =
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getServerNameFieldBuilder() : null;
+            } else {
+              serverNameBuilder_.addAllMessages(other.serverName_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getServerNameCount(); i++) {
+          if (!getServerName(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> serverName_ =
+        java.util.Collections.emptyList();
+      private void ensureServerNameIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>(serverName_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
+        if (serverNameBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(serverName_);
+        } else {
+          return serverNameBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public int getServerNameCount() {
+        if (serverNameBuilder_ == null) {
+          return serverName_.size();
+        } else {
+          return serverNameBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
+        if (serverNameBuilder_ == null) {
+          return serverName_.get(index);
+        } else {
+          return serverNameBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.set(index, value);
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.add(value);
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.add(index, value);
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.add(builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addAllServerName(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> values) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, serverName_);
+          onChanged();
+        } else {
+          serverNameBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder removeServerName(int index) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.remove(index);
+          onChanged();
+        } else {
+          serverNameBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder(
+          int index) {
+        return getServerNameFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+          int index) {
+        if (serverNameBuilder_ == null) {
+          return serverName_.get(index);  } else {
+          return serverNameBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+           getServerNameOrBuilderList() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(serverName_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() {
+        return getServerNameFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder(
+          int index) {
+        return getServerNameFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder>
+           getServerNameBuilderList() {
+        return getServerNameFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.ListDrainingRegionServersResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.ListDrainingRegionServersResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<ListDrainingRegionServersResponse>() {
+      public ListDrainingRegionServersResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new ListDrainingRegionServersResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListDrainingRegionServersResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface DrainRegionServersRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.DrainRegionServersRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>
+        getServerNameList();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index);
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    int getServerNameCount();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+        getServerNameOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.DrainRegionServersRequest}
+   */
+  public  static final class DrainRegionServersRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.DrainRegionServersRequest)
+      DrainRegionServersRequestOrBuilder {
+    // Use DrainRegionServersRequest.newBuilder() to construct.
+    private DrainRegionServersRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private DrainRegionServersRequest() {
+      serverName_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private DrainRegionServersRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              serverName_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          serverName_ = java.util.Collections.unmodifiableList(serverName_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DrainRegionServersRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DrainRegionServersRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.Builder.class);
+    }
+
+    public static final int SERVER_NAME_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> serverName_;
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
+      return serverName_;
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+        getServerNameOrBuilderList() {
+      return serverName_;
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public int getServerNameCount() {
+      return serverName_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
+      return serverName_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+        int index) {
+      return serverName_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      for (int i = 0; i < getServerNameCount(); i++) {
+        if (!getServerName(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      for (int i = 0; i < serverName_.size(); i++) {
+        output.writeMessage(1, serverName_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < serverName_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, serverName_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest) obj;
+
+      boolean result = true;
+      result = result && getServerNameList()
+          .equals(other.getServerNameList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getServerNameCount() > 0) {
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerNameList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.DrainRegionServersRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.DrainRegionServersRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DrainRegionServersRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DrainRegionServersRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getServerNameFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (serverNameBuilder_ == null) {
+          serverName_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          serverNameBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DrainRegionServersRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest(this);
+        int from_bitField0_ = bitField0_;
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            serverName_ = java.util.Collections.unmodifiableList(serverName_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance()) return this;
+        if (serverNameBuilder_ == null) {
+          if (!other.serverName_.isEmpty()) {
+            if (serverName_.isEmpty()) {
+              serverName_ = other.serverName_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureServerNameIsMutable();
+              serverName_.addAll(other.serverName_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.serverName_.isEmpty()) {
+            if (serverNameBuilder_.isEmpty()) {
+              serverNameBuilder_.dispose();
+              serverNameBuilder_ = null;
+              serverName_ = other.serverName_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              serverNameBuilder_ =
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getServerNameFieldBuilder() : null;
+            } else {
+              serverNameBuilder_.addAllMessages(other.serverName_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getServerNameCount(); i++) {
+          if (!getServerName(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> serverName_ =
+        java.util.Collections.emptyList();
+      private void ensureServerNameIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          serverName_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>(serverName_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> getServerNameList() {
+        if (serverNameBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(serverName_);
+        } else {
+          return serverNameBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public int getServerNameCount() {
+        if (serverNameBuilder_ == null) {
+          return serverName_.size();
+        } else {
+          return serverNameBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getServerName(int index) {
+        if (serverNameBuilder_ == null) {
+          return serverName_.get(index);
+        } else {
+          return serverNameBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.set(index, value);
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder setServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.add(value);
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureServerNameIsMutable();
+          serverName_.add(index, value);
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.add(builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addServerName(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          serverNameBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder addAllServerName(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> values) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, serverName_);
+          onChanged();
+        } else {
+          serverNameBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public Builder removeServerName(int index) {
+        if (serverNameBuilder_ == null) {
+          ensureServerNameIsMutable();
+          serverName_.remove(index);
+          onChanged();
+        } else {
+          serverNameBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder(
+          int index) {
+        return getServerNameFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder(
+          int index) {
+        if (serverNameBuilder_ == null) {
+          return serverName_.get(index);  } else {
+          return serverNameBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+           getServerNameOrBuilderList() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(serverName_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder() {
+        return getServerNameFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder addServerNameBuilder(
+          int index) {
+        return getServerNameFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder>
+           getServerNameBuilderList() {
+        return getServerNameFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.DrainRegionServersRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.DrainRegionServersRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DrainRegionServersRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DrainRegionServersRequest>() {
+      public DrainRegionServersRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new DrainRegionServersRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DrainRegionServersRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DrainRegionServersRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface DrainRegionServersResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.DrainRegionServersResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.DrainRegionServersResponse}
+   */
+  public  static final class DrainRegionServersResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.DrainRegionServersResponse)
+      DrainRegionServersResponseOrBuilder {
+    // Use DrainRegionServersResponse.newBuilder() to construct.
+    private DrainRegionServersResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private DrainRegionServers

<TRUNCATED>

[29/50] [abbrv] hbase git commit: HBASE-17376 ClientAsyncPrefetchScanner may fail due to too many rows (ChiaPing Tsai)

Posted by sy...@apache.org.
HBASE-17376 ClientAsyncPrefetchScanner may fail due to too many rows (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e18e9a22
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e18e9a22
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e18e9a22

Branch: refs/heads/hbase-12439
Commit: e18e9a22daf32306f966641ea02a72fca96dee32
Parents: 463ffa7
Author: tedyu <yu...@gmail.com>
Authored: Mon Dec 26 15:55:22 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Dec 26 15:55:22 2016 -0800

----------------------------------------------------------------------
 .../client/ClientAsyncPrefetchScanner.java      |  2 +-
 .../client/TestScannersFromClientSide.java      | 93 ++++++++++++++------
 2 files changed, 69 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e18e9a22/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
index ec33dd2..6b70a88 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
@@ -76,7 +76,7 @@ public class ClientAsyncPrefetchScanner extends ClientScanner {
   protected void initCache() {
     // concurrent cache
     cacheCapacity = calcCacheCapacity();
-    cache = new LinkedBlockingQueue<Result>(cacheCapacity);
+    cache = new LinkedBlockingQueue<Result>();
     cacheSizeInBytes = new AtomicLong(0);
     exceptionsQueue = new ConcurrentLinkedQueue<Exception>();
     prefetchRunnable = new PrefetchRunnable();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e18e9a22/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 19b06b5..8862109 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -21,6 +21,8 @@ import static org.junit.Assert.assertTrue;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.IntStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -41,6 +43,7 @@ import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -646,45 +649,72 @@ public class TestScannersFromClientSide {
     verifyResult(result, kvListExp, toLog, "Testing scan on re-opened region");
   }
 
-  /**
-   * Test from client side for async scan
-   *
-   * @throws Exception
-   */
   @Test
-  public void testAsyncScanner() throws Exception {
-    TableName TABLE = TableName.valueOf("testAsyncScan");
-    byte [][] ROWS = HTestConst.makeNAscii(ROW, 2);
-    byte [][] FAMILIES = HTestConst.makeNAscii(FAMILY, 3);
-    byte [][] QUALIFIERS = HTestConst.makeNAscii(QUALIFIER, 10);
+  public void testAsyncScannerWithSmallData() throws Exception {
+    testAsyncScanner(TableName.valueOf("testAsyncScannerWithSmallData"),
+      2,
+      3,
+      10);
+  }
 
-    Table ht = TEST_UTIL.createTable(TABLE, FAMILIES);
+  @Test
+  public void testAsyncScannerWithManyRows() throws Exception {
+    testAsyncScanner(TableName.valueOf("testAsyncScannerWithManyRows"),
+      30000,
+      1,
+      1);
+  }
 
-    Put put;
-    Scan scan;
-    Result result;
-    boolean toLog = true;
-    List<Cell> kvListExp, kvListScan;
+  private void testAsyncScanner(TableName table, int rowNumber, int familyNumber,
+      int qualifierNumber) throws Exception {
+    assert rowNumber > 0;
+    assert familyNumber > 0;
+    assert qualifierNumber > 0;
+    byte[] row = Bytes.toBytes("r");
+    byte[] family = Bytes.toBytes("f");
+    byte[] qualifier = Bytes.toBytes("q");
+    byte[][] rows = makeNAsciiWithZeroPrefix(row, rowNumber);
+    byte[][] families = makeNAsciiWithZeroPrefix(family, familyNumber);
+    byte[][] qualifiers = makeNAsciiWithZeroPrefix(qualifier, qualifierNumber);
 
-    kvListExp = new ArrayList<Cell>();
+    Table ht = TEST_UTIL.createTable(table, families);
 
-    for (int r=0; r < ROWS.length; r++) {
-      put = new Put(ROWS[r]);
-      for (int c=0; c < FAMILIES.length; c++) {
-        for (int q=0; q < QUALIFIERS.length; q++) {
-          KeyValue kv = new KeyValue(ROWS[r], FAMILIES[c], QUALIFIERS[q], 1, VALUE);
+    boolean toLog = true;
+    List<Cell> kvListExp = new ArrayList<>();
+
+    List<Put> puts = new ArrayList<>();
+    for (byte[] r : rows) {
+      Put put = new Put(r);
+      for (byte[] f : families) {
+        for (byte[] q : qualifiers) {
+          KeyValue kv = new KeyValue(r, f, q, 1, VALUE);
           put.add(kv);
           kvListExp.add(kv);
         }
       }
-      ht.put(put);
+      puts.add(put);
+      if (puts.size() > 1000) {
+        ht.put(puts);
+        puts.clear();
+      }
+    }
+    if (!puts.isEmpty()) {
+      ht.put(puts);
+      puts.clear();
     }
 
-    scan = new Scan();
+    Scan scan = new Scan();
     scan.setAsyncPrefetch(true);
     ResultScanner scanner = ht.getScanner(scan);
-    kvListScan = new ArrayList<Cell>();
+    List<Cell> kvListScan = new ArrayList<>();
+    Result result;
+    boolean first = true;
     while ((result = scanner.next()) != null) {
+      // waiting for cache. see HBASE-17376
+      if (first) {
+        TimeUnit.SECONDS.sleep(1);
+        first = false;
+      }
       for (Cell kv : result.listCells()) {
         kvListScan.add(kv);
       }
@@ -692,7 +722,20 @@ public class TestScannersFromClientSide {
     result = Result.create(kvListScan);
     assertTrue("Not instance of async scanner",scanner instanceof ClientAsyncPrefetchScanner);
     verifyResult(result, kvListExp, toLog, "Testing async scan");
+    TEST_UTIL.deleteTable(table);
+  }
 
+  private static byte[][] makeNAsciiWithZeroPrefix(byte[] base, int n) {
+    int maxLength = Integer.toString(n).length();
+    byte [][] ret = new byte[n][];
+    for (int i = 0; i < n; i++) {
+      int length = Integer.toString(i).length();
+      StringBuilder buf = new StringBuilder(Integer.toString(i));
+      IntStream.range(0, maxLength - length).forEach(v -> buf.insert(0, "0"));
+      byte[] tail = Bytes.toBytes(buf.toString());
+      ret[i] = Bytes.add(base, tail);
+    }
+    return ret;
   }
 
   static void verifyResult(Result result, List<Cell> expKvList, boolean toLog,


[14/50] [abbrv] hbase git commit: HBASE-17160 Undo unnecessary inter-module dependency; spark to hbase-it and hbase-it to shell; ADDENDUM

Posted by sy...@apache.org.
HBASE-17160 Undo unnecessary inter-module dependency; spark to hbase-it and hbase-it to shell; ADDENDUM


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/45da294a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/45da294a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/45da294a

Branch: refs/heads/hbase-12439
Commit: 45da294a171faea10b34b022c9d3990bdb72d0e8
Parents: 09bb428
Author: Michael Stack <st...@apache.org>
Authored: Thu Dec 22 08:16:55 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Thu Dec 22 08:17:30 2016 -0800

----------------------------------------------------------------------
 hbase-endpoint/pom.xml | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/45da294a/hbase-endpoint/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 0838efa..fc200f8 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -119,6 +119,16 @@
       </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-hadoop-compat</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>${compat.module}</artifactId>
       <version>${project.version}</version>
     </dependency>


[18/50] [abbrv] hbase git commit: HBASE-17314 Limit total buffered size for all replication sources

Posted by sy...@apache.org.
HBASE-17314 Limit total buffered size for all replication sources


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8fb9a91d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8fb9a91d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8fb9a91d

Branch: refs/heads/hbase-12439
Commit: 8fb9a91d441fc7ea8d316ca3fb670ddc6dd6561a
Parents: b3f2bec
Author: Phil Yang <ya...@apache.org>
Authored: Tue Dec 20 16:05:18 2016 +0800
Committer: Phil Yang <ya...@apache.org>
Committed: Fri Dec 23 11:48:06 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |  10 +
 .../hbase/regionserver/HRegionServer.java       |   3 +-
 .../regionserver/ReplicationSource.java         |  37 +++-
 .../regionserver/ReplicationSourceManager.java  |   8 +
 .../replication/TestReplicationEndpoint.java    |   3 +-
 .../replication/TestReplicationSource.java      |  13 +-
 .../regionserver/TestGlobalThrottler.java       | 184 +++++++++++++++++++
 7 files changed, 245 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 48d9778..1eec691 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -933,6 +933,16 @@ public final class HConstants {
       REPLICATION_SERIALLY_WAITING_DEFAULT = 10000;
 
   /**
+   * Max total size of buffered entries in all replication peers. It will prevent server getting
+   * OOM if there are many peers. Default value is 256MB which is four times to default
+   * replication.source.size.capacity.
+   */
+  public static final String REPLICATION_SOURCE_TOTAL_BUFFER_KEY = "replication.total.buffer.quota";
+
+  public static final int REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT = 256 * 1024 * 1024;
+
+
+  /**
    * Directory where the source cluster file system client configuration are placed which is used by
    * sink cluster to copy HFiles from source cluster file system
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 5bc0a66..853d699 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2340,7 +2340,8 @@ public class HRegionServer extends HasThread implements
    * @return Return the object that implements the replication
    * source service.
    */
-  ReplicationSourceService getReplicationSourceService() {
+  @VisibleForTesting
+  public ReplicationSourceService getReplicationSourceService() {
     return replicationSourceHandler;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index f777282..3eeb4b8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -150,6 +150,9 @@ public class ReplicationSource extends Thread
   private ConcurrentHashMap<String, ReplicationSourceWorkerThread> workerThreads =
       new ConcurrentHashMap<String, ReplicationSourceWorkerThread>();
 
+  private AtomicLong totalBufferUsed;
+  private long totalBufferQuota;
+
   /**
    * Instantiation method used by region servers
    *
@@ -201,7 +204,9 @@ public class ReplicationSource extends Thread
     defaultBandwidth = this.conf.getLong("replication.source.per.peer.node.bandwidth", 0);
     currentBandwidth = getCurrentBandwidth();
     this.throttler = new ReplicationThrottler((double) currentBandwidth / 10.0);
-
+    this.totalBufferUsed = manager.getTotalBufferUsed();
+    this.totalBufferQuota = conf.getLong(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY,
+        HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT);
     LOG.info("peerClusterZnode=" + peerClusterZnode + ", ReplicationSource : " + peerId
         + " inited, replicationQueueSizeCapacity=" + replicationQueueSizeCapacity
         + ", replicationQueueNbCapacity=" + replicationQueueNbCapacity + ", curerntBandwidth="
@@ -536,7 +541,7 @@ public class ReplicationSource extends Thread
     private boolean workerRunning = true;
     // Current number of hfiles that we need to replicate
     private long currentNbHFiles = 0;
-
+    List<WAL.Entry> entries;
     // Use guava cache to set ttl for each key
     private LoadingCache<String, Boolean> canSkipWaitingSet = CacheBuilder.newBuilder()
         .expireAfterAccess(1, TimeUnit.DAYS).build(
@@ -556,6 +561,7 @@ public class ReplicationSource extends Thread
       this.replicationQueueInfo = replicationQueueInfo;
       this.repLogReader = new ReplicationWALReaderManager(fs, conf);
       this.source = source;
+      this.entries = new ArrayList<>();
     }
 
     @Override
@@ -628,8 +634,7 @@ public class ReplicationSource extends Thread
         boolean gotIOE = false;
         currentNbOperations = 0;
         currentNbHFiles = 0;
-        List<WAL.Entry> entries = new ArrayList<WAL.Entry>(1);
-
+        entries.clear();
         Map<String, Long> lastPositionsForSerialScope = new HashMap<>();
         currentSize = 0;
         try {
@@ -721,6 +726,7 @@ public class ReplicationSource extends Thread
           continue;
         }
         shipEdits(currentWALisBeingWrittenTo, entries, lastPositionsForSerialScope);
+        releaseBufferQuota();
       }
       if (replicationQueueInfo.isQueueRecovered()) {
         // use synchronize to make sure one last thread will clean the queue
@@ -810,7 +816,7 @@ public class ReplicationSource extends Thread
             }
           }
         }
-
+        boolean totalBufferTooLarge = false;
         // don't replicate if the log entries have already been consumed by the cluster
         if (replicationEndpoint.canReplicateToSameCluster()
             || !entry.getKey().getClusterIds().contains(peerClusterId)) {
@@ -828,15 +834,16 @@ public class ReplicationSource extends Thread
             logKey.addClusterId(clusterId);
             currentNbOperations += countDistinctRowKeys(edit);
             entries.add(entry);
-            currentSize += entry.getEdit().heapSize();
-            currentSize += calculateTotalSizeOfStoreFiles(edit);
+            int delta = (int)entry.getEdit().heapSize() + calculateTotalSizeOfStoreFiles(edit);
+            currentSize += delta;
+            totalBufferTooLarge = acquireBufferQuota(delta);
           } else {
             metrics.incrLogEditsFiltered();
           }
         }
         // Stop if too many entries or too big
         // FIXME check the relationship between single wal group and overall
-        if (currentSize >= replicationQueueSizeCapacity
+        if (totalBufferTooLarge || currentSize >= replicationQueueSizeCapacity
             || entries.size() >= replicationQueueNbCapacity) {
           break;
         }
@@ -1317,5 +1324,19 @@ public class ReplicationSource extends Thread
     public void setWorkerRunning(boolean workerRunning) {
       this.workerRunning = workerRunning;
     }
+
+    /**
+     * @param size delta size for grown buffer
+     * @return true if we should clear buffer and push all
+     */
+    private boolean acquireBufferQuota(long size) {
+      return totalBufferUsed.addAndGet(size) >= totalBufferQuota;
+    }
+
+    private void releaseBufferQuota() {
+      totalBufferUsed.addAndGet(-currentSize);
+      currentSize = 0;
+      entries.clear();
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 2c9fdcc..ef4093e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -42,6 +42,7 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -126,6 +127,8 @@ public class ReplicationSourceManager implements ReplicationListener {
   private Connection connection;
   private long replicationWaitTime;
 
+  private AtomicLong totalBufferUsed = new AtomicLong();
+
   /**
    * Creates a replication manager and sets the watch on all the other registered region servers
    * @param replicationQueues the interface for manipulating replication queues
@@ -435,6 +438,11 @@ public class ReplicationSourceManager implements ReplicationListener {
     }
   }
 
+  @VisibleForTesting
+  public AtomicLong getTotalBufferUsed() {
+    return totalBufferUsed;
+  }
+
   /**
    * Factory method to create a replication source
    * @param conf the configuration to use

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
index 002b8c9..f9c467e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -361,7 +362,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
     @Override
     public boolean replicate(ReplicateContext replicateContext) {
       replicateCount.incrementAndGet();
-      lastEntries = replicateContext.entries;
+      lastEntries = new ArrayList<>(replicateContext.entries);
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
index abdd68a..7461edb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertNull;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceManager;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.wal.WALFactory;
@@ -50,6 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
 
 import static org.mockito.Mockito.mock;
 
@@ -140,11 +143,15 @@ public class TestReplicationSource {
       }
     };
     replicationEndpoint.start();
-    ReplicationPeers mockPeers = mock(ReplicationPeers.class);
+    ReplicationPeers mockPeers = Mockito.mock(ReplicationPeers.class);
+    ReplicationPeer mockPeer = Mockito.mock(ReplicationPeer.class);
+    Mockito.when(mockPeer.getPeerBandwidth()).thenReturn(0L);
     Configuration testConf = HBaseConfiguration.create();
     testConf.setInt("replication.source.maxretriesmultiplier", 1);
-    source.init(testConf, null, null, null, mockPeers, null, "testPeer", null,
-      replicationEndpoint, null);
+    ReplicationSourceManager manager = Mockito.mock(ReplicationSourceManager.class);
+    Mockito.when(manager.getTotalBufferUsed()).thenReturn(new AtomicLong());
+    source.init(testConf, null, manager, null, mockPeers, null, "testPeer",
+        null, replicationEndpoint, null);
     ExecutorService executor = Executors.newSingleThreadExecutor();
     Future<?> future = executor.submit(new Runnable() {
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/8fb9a91d/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
new file mode 100644
index 0000000..7e4ae45
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
@@ -0,0 +1,184 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.HTestConst;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ ReplicationTests.class, LargeTests.class })
+public class TestGlobalThrottler {
+  private static final Log LOG = LogFactory.getLog(TestGlobalThrottler.class);
+  private static Configuration conf1;
+  private static Configuration conf2;
+
+  private static HBaseTestingUtility utility1;
+  private static HBaseTestingUtility utility2;
+
+  private static final byte[] famName = Bytes.toBytes("f");
+  private static final byte[] VALUE = Bytes.toBytes("v");
+  private static final byte[] ROW = Bytes.toBytes("r");
+  private static final byte[][] ROWS = HTestConst.makeNAscii(ROW, 100);
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    conf1 = HBaseConfiguration.create();
+    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
+    conf1.setLong("replication.source.sleepforretries", 100);
+    // Each WAL is about 120 bytes
+    conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, 200);
+    conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);
+
+    utility1 = new HBaseTestingUtility(conf1);
+    utility1.startMiniZKCluster();
+    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
+    new ZooKeeperWatcher(conf1, "cluster1", null, true);
+
+    conf2 = new Configuration(conf1);
+    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
+
+    utility2 = new HBaseTestingUtility(conf2);
+    utility2.setZkCluster(miniZK);
+    new ZooKeeperWatcher(conf2, "cluster2", null, true);
+
+    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(utility2.getClusterKey());
+
+    utility1.startMiniCluster(1, 1);
+    utility2.startMiniCluster(1, 1);
+
+    admin1.addPeer("peer1", rpc, null);
+    admin1.addPeer("peer2", rpc, null);
+    admin1.addPeer("peer3", rpc, null);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility2.shutdownMiniCluster();
+    utility1.shutdownMiniCluster();
+  }
+
+
+  volatile private boolean testQuotaPass = false;
+  volatile private boolean testQuotaNonZero = false;
+  @Test
+  public void testQuota() throws IOException {
+    TableName tableName = TableName.valueOf("testQuota");
+    HTableDescriptor table = new HTableDescriptor(tableName);
+    HColumnDescriptor fam = new HColumnDescriptor(famName);
+    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
+    table.addFamily(fam);
+    utility1.getHBaseAdmin().createTable(table);
+    utility2.getHBaseAdmin().createTable(table);
+
+    Thread watcher = new Thread(()->{
+      Replication replication = (Replication)utility1.getMiniHBaseCluster()
+          .getRegionServer(0).getReplicationSourceService();
+      AtomicLong bufferUsed = replication.getReplicationManager().getTotalBufferUsed();
+      testQuotaPass = true;
+      while (!Thread.interrupted()) {
+        long size = bufferUsed.get();
+        if (size > 0) {
+          testQuotaNonZero = true;
+        }
+        if (size > 600) {
+          // We read logs first then check throttler, so if the buffer quota limiter doesn't
+          // take effect, it will push many logs and exceed the quota.
+          testQuotaPass = false;
+        }
+        Threads.sleep(50);
+      }
+    });
+    watcher.start();
+
+    try(Table t1 = utility1.getConnection().getTable(tableName);
+        Table t2 = utility2.getConnection().getTable(tableName)) {
+      for (int i = 0; i < 50; i++) {
+        Put put = new Put(ROWS[i]);
+        put.addColumn(famName, VALUE, VALUE);
+        t1.put(put);
+      }
+      long start = EnvironmentEdgeManager.currentTime();
+      while (EnvironmentEdgeManager.currentTime() - start < 180000) {
+        Scan scan = new Scan();
+        scan.setCaching(50);
+        int count = 0;
+        try (ResultScanner results = t2.getScanner(scan)) {
+          for (Result result : results) {
+            count++;
+          }
+        }
+        if (count < 50) {
+          LOG.info("Waiting all logs pushed to slave. Expected 50 , actual " + count);
+          Threads.sleep(200);
+          continue;
+        }
+        break;
+      }
+    }
+
+    watcher.interrupt();
+    Assert.assertTrue(testQuotaPass);
+    Assert.assertTrue(testQuotaNonZero);
+  }
+
+  private List<Integer> getRowNumbers(List<Cell> cells) {
+    List<Integer> listOfRowNumbers = new ArrayList<>();
+    for (Cell c : cells) {
+      listOfRowNumbers.add(Integer.parseInt(Bytes
+          .toString(c.getRowArray(), c.getRowOffset() + ROW.length,
+              c.getRowLength() - ROW.length)));
+    }
+    return listOfRowNumbers;
+  }
+}


[35/50] [abbrv] hbase git commit: HBASE-17081 Flush the entire CompactingMemStore content to disk - revert due to failure in TestHRegionWithInMemoryFlush

Posted by sy...@apache.org.
HBASE-17081 Flush the entire CompactingMemStore content to disk - revert due to failure in TestHRegionWithInMemoryFlush


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79e5efd3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79e5efd3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79e5efd3

Branch: refs/heads/hbase-12439
Commit: 79e5efd35c9f3660b8c58364f25816581fb84d7a
Parents: da97569
Author: tedyu <yu...@gmail.com>
Authored: Wed Dec 28 10:53:07 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Dec 28 10:53:07 2016 -0800

----------------------------------------------------------------------
 .../hbase/regionserver/AbstractMemStore.java    |  35 +-
 .../hbase/regionserver/CompactingMemStore.java  |  83 ++---
 .../hbase/regionserver/CompactionPipeline.java  |  34 +-
 .../regionserver/CompositeImmutableSegment.java | 352 -------------------
 .../hbase/regionserver/DefaultMemStore.java     |  23 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   5 +-
 .../hbase/regionserver/ImmutableSegment.java    |  23 +-
 .../hbase/regionserver/MemStoreCompactor.java   |   4 +-
 .../hadoop/hbase/regionserver/MemstoreSize.java |  25 +-
 .../hadoop/hbase/regionserver/Segment.java      |  21 +-
 .../hbase/regionserver/SegmentFactory.java      |  10 -
 .../regionserver/TestCompactingMemStore.java    |   8 +-
 .../hbase/regionserver/TestDefaultMemStore.java |  12 +-
 .../TestWalAndCompactingMemStoreFlush.java      | 238 ++++++-------
 14 files changed, 175 insertions(+), 698 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 8564045..225dd73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -159,12 +159,14 @@ public abstract class AbstractMemStore implements MemStore {
   public String toString() {
     StringBuffer buf = new StringBuffer();
     int i = 1;
-
-    for (Segment segment : getSegments()) {
-      buf.append("Segment (" + i + ") " + segment.toString() + "; ");
-      i++;
+    try {
+      for (Segment segment : getSegments()) {
+        buf.append("Segment (" + i + ") " + segment.toString() + "; ");
+        i++;
+      }
+    } catch (IOException e){
+      return e.toString();
     }
-
     return buf.toString();
   }
 
@@ -230,7 +232,6 @@ public abstract class AbstractMemStore implements MemStore {
    * @return Next row or null if none found.  If one found, will be a new
    * KeyValue -- can be destroyed by subsequent calls to this method.
    */
-  @VisibleForTesting
   protected Cell getNextRow(final Cell key,
       final NavigableSet<Cell> set) {
     Cell result = null;
@@ -248,26 +249,6 @@ public abstract class AbstractMemStore implements MemStore {
     return result;
   }
 
-  /**
-   * @param cell Find the row that comes after this one.  If null, we return the
-   *             first.
-   * @return Next row or null if none found.
-   */
-  @VisibleForTesting
-  Cell getNextRow(final Cell cell) {
-    Cell lowest = null;
-    List<Segment> segments = getSegments();
-    for (Segment segment : segments) {
-      if (lowest == null) {
-        //TODO: we may want to move the getNextRow ability to the segment
-        lowest = getNextRow(cell, segment.getCellSet());
-      } else {
-        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));
-      }
-    }
-    return lowest;
-  }
-
   private Cell maybeCloneWithAllocator(Cell cell) {
     return active.maybeCloneWithAllocator(cell);
   }
@@ -326,6 +307,6 @@ public abstract class AbstractMemStore implements MemStore {
   /**
    * @return an ordered list of segments from most recent to oldest in memstore
    */
-  protected abstract List<Segment> getSegments();
+  protected abstract List<Segment> getSegments() throws IOException;
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 1cd30dd..f8192a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -72,7 +72,6 @@ public class CompactingMemStore extends AbstractMemStore {
   private final AtomicBoolean inMemoryFlushInProgress = new AtomicBoolean(false);
   @VisibleForTesting
   private final AtomicBoolean allowCompaction = new AtomicBoolean(true);
-  private boolean compositeSnapshot = true;
 
   public static final long DEEP_OVERHEAD = AbstractMemStore.DEEP_OVERHEAD
       + 6 * ClassSize.REFERENCE // Store, RegionServicesForStores, CompactionPipeline,
@@ -161,12 +160,7 @@ public class CompactingMemStore extends AbstractMemStore {
       stopCompaction();
       pushActiveToPipeline(this.active);
       snapshotId = EnvironmentEdgeManager.currentTime();
-      // in both cases whatever is pushed to snapshot is cleared from the pipeline
-      if (compositeSnapshot) {
-        pushPipelineToSnapshot();
-      } else {
-        pushTailToSnapshot();
-      }
+      pushTailToSnapshot();
     }
     return new MemStoreSnapshot(snapshotId, this.snapshot);
   }
@@ -179,13 +173,8 @@ public class CompactingMemStore extends AbstractMemStore {
   public MemstoreSize getFlushableSize() {
     MemstoreSize snapshotSize = getSnapshotSize();
     if (snapshotSize.getDataSize() == 0) {
-      // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed
-      if (compositeSnapshot) {
-        snapshotSize = pipeline.getPipelineSize();
-        snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
-      } else {
-        snapshotSize = pipeline.getTailSize();
-      }
+      // if snapshot is empty the tail of the pipeline is flushed
+      snapshotSize = pipeline.getTailSize();
     }
     return snapshotSize.getDataSize() > 0 ? snapshotSize
         : new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
@@ -224,28 +213,16 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
-  // the getSegments() method is used for tests only
-  @VisibleForTesting
   @Override
   public List<Segment> getSegments() {
     List<Segment> pipelineList = pipeline.getSegments();
     List<Segment> list = new ArrayList<Segment>(pipelineList.size() + 2);
     list.add(this.active);
     list.addAll(pipelineList);
-    list.addAll(this.snapshot.getAllSegments());
-
+    list.add(this.snapshot);
     return list;
   }
 
-  // the following three methods allow to manipulate the settings of composite snapshot
-  public void setCompositeSnapshot(boolean useCompositeSnapshot) {
-    this.compositeSnapshot = useCompositeSnapshot;
-  }
-
-  public boolean isCompositeSnapshot() {
-    return this.compositeSnapshot;
-  }
-
   public boolean swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result,
       boolean merge) {
     return pipeline.swap(versionedList, result, !merge);
@@ -285,20 +262,18 @@ public class CompactingMemStore extends AbstractMemStore {
    * Scanners are ordered from 0 (oldest) to newest in increasing order.
    */
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
-
-    int order = 1;                        // for active segment
-    order += pipeline.size();             // for all segments in the pipeline
-    order += snapshot.getNumOfSegments(); // for all segments in the snapshot
-    // TODO: check alternatives to using this order
-    // The list of elements in pipeline + the active element + the snapshot segments
-    // The order is the Segment ordinal
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(order);
-    list.add(this.active.getScanner(readPt, order));
-    order--;
-    list.addAll(pipeline.getScanners(readPt,order));
-    order -= pipeline.size();
-    list.addAll(snapshot.getScanners(readPt,order));
-    return Collections.<KeyValueScanner>singletonList(new MemStoreScanner(getComparator(), list));
+    List<Segment> pipelineList = pipeline.getSegments();
+    long order = pipelineList.size();
+    // The list of elements in pipeline + the active element + the snapshot segment
+    // TODO : This will change when the snapshot is made of more than one element
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(pipelineList.size() + 2);
+    list.add(this.active.getScanner(readPt, order + 1));
+    for (Segment item : pipelineList) {
+      list.add(item.getScanner(readPt, order));
+      order--;
+    }
+    list.add(this.snapshot.getScanner(readPt, order));
+    return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
   }
 
   /**
@@ -405,14 +380,6 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
-  private void pushPipelineToSnapshot() {
-    List<ImmutableSegment> segments = pipeline.drain();
-    if (!segments.isEmpty()) {
-      this.snapshot =
-          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(),segments);
-    }
-  }
-
   private RegionServicesForStores getRegionServices() {
     return regionServices;
   }
@@ -460,6 +427,24 @@ public class CompactingMemStore extends AbstractMemStore {
     compactor.initiateAction(compactionType);
   }
 
+  /**
+   * @param cell Find the row that comes after this one.  If null, we return the
+   *             first.
+   * @return Next row or null if none found.
+   */
+  Cell getNextRow(final Cell cell) {
+    Cell lowest = null;
+    List<Segment> segments = getSegments();
+    for (Segment segment : segments) {
+      if (lowest == null) {
+        lowest = getNextRow(cell, segment.getCellSet());
+      } else {
+        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));
+      }
+    }
+    return lowest;
+  }
+
   // debug method
   public void debug() {
     String msg = "active size=" + this.active.keySize();

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index 2fd2a14..6676170 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -18,7 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -78,19 +77,6 @@ public class CompactionPipeline {
     }
   }
 
-  public List<ImmutableSegment> drain() {
-    int drainSize = pipeline.size();
-    List<ImmutableSegment> result = new ArrayList<ImmutableSegment>(drainSize);
-    synchronized (pipeline){
-      version++;
-      for(int i=0; i<drainSize; i++) {
-        ImmutableSegment segment = this.pipeline.removeFirst();
-        result.add(i,segment);
-      }
-      return result;
-    }
-  }
-
   public VersionedSegmentsList getVersionedList() {
     synchronized (pipeline){
       LinkedList<ImmutableSegment> segmentList = new LinkedList<ImmutableSegment>(pipeline);
@@ -207,7 +193,8 @@ public class CompactionPipeline {
 
   public List<Segment> getSegments() {
     synchronized (pipeline){
-      return new LinkedList<Segment>(pipeline);
+      List<Segment> res = new LinkedList<Segment>(pipeline);
+      return res;
     }
   }
 
@@ -215,18 +202,6 @@ public class CompactionPipeline {
     return pipeline.size();
   }
 
-  public List<KeyValueScanner> getScanners(long readPoint, long order) {
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(this.pipeline.size());
-    for (Segment segment : this.pipeline) {
-      scanners.add(segment.getScanner(readPoint, order));
-      // The order is the Segment ordinal
-      order--;
-      assert order>=0; // order should never be negative so this is just a sanity check
-    }
-    return scanners;
-  }
-
-
   public long getMinSequenceId() {
     long minSequenceId = Long.MAX_VALUE;
     if (!isEmpty()) {
@@ -240,11 +215,6 @@ public class CompactionPipeline {
     return new MemstoreSize(pipeline.peekLast().keySize(), pipeline.peekLast().heapOverhead());
   }
 
-  public MemstoreSize getPipelineSize() {
-    if (isEmpty()) return MemstoreSize.EMPTY_SIZE;
-    return new MemstoreSize(getSegmentsKeySize(pipeline), getSegmentsHeapOverhead(pipeline));
-  }
-
   private void swapSuffix(List<ImmutableSegment> suffix, ImmutableSegment segment,
       boolean closeSegmentsInSuffix) {
     version++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
deleted file mode 100644
index 4fdd2d0..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
+++ /dev/null
@@ -1,352 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellScanner;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Scan;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.SortedSet;
-
-/**
- * The CompositeImmutableSegments is created as a collection of ImmutableSegments and supports
- * the interface of a single ImmutableSegments.
- * The CompositeImmutableSegments is planned to be used only as a snapshot,
- * thus only relevant interfaces are supported
- */
-@InterfaceAudience.Private
-public class CompositeImmutableSegment extends ImmutableSegment {
-
-  private final List<ImmutableSegment> segments;
-  private final CellComparator comparator;
-  // CompositeImmutableSegment is used for snapshots and snapshot should
-  // support getTimeRangeTracker() interface.
-  // Thus we hold a constant TRT build in the construction time from TRT of the given segments.
-  private final TimeRangeTracker timeRangeTracker;
-  private long keySize = 0;
-
-  // This scanner need to be remembered in order to close it when the snapshot is cleared.
-  // Initially CollectionBackedScanner didn't raise the scanner counters thus there was no
-  // need to close it. Now when MemStoreScanner is used instead we need to decrease the
-  // scanner counters.
-  private KeyValueScanner flushingScanner = null;
-
-  public CompositeImmutableSegment(CellComparator comparator, List<ImmutableSegment> segments) {
-    super(comparator);
-    this.comparator = comparator;
-    this.segments = segments;
-    this.timeRangeTracker = new TimeRangeTracker();
-    for (ImmutableSegment s : segments) {
-      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMax());
-      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMin());
-      this.keySize += s.keySize();
-    }
-  }
-
-  @VisibleForTesting
-  public List<Segment> getAllSegments() {
-    return new LinkedList<Segment>(segments);
-  }
-
-  public long getNumOfSegments() {
-    return segments.size();
-  }
-
-  /**
-   * Builds a special scanner for the MemStoreSnapshot object that is different than the
-   * general segment scanner.
-   * @return a special scanner for the MemStoreSnapshot object
-   */
-  public KeyValueScanner getKeyValueScanner() {
-    KeyValueScanner scanner;
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
-    for (ImmutableSegment s : segments) {
-      list.add(s.getScanner(Long.MAX_VALUE));
-    }
-
-    try {
-      scanner = new MemStoreScanner(getComparator(), list);
-    } catch (IOException ie) {
-      throw new IllegalStateException(ie);
-    }
-
-    flushingScanner = scanner;
-    return scanner;
-  }
-
-  @Override
-  public List<KeyValueScanner> getScanners(long readPoint, long order) {
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(this.segments.size());
-    for (Segment segment : this.segments) {
-      scanners.add(segment.getScanner(readPoint, order));
-      // The order is the Segment ordinal
-      order--;
-      // order should never be negative so this is just a sanity check
-      order = (order<0) ? 0 : order;
-    }
-    return scanners;
-  }
-
-  /**
-   * @return whether the segment has any cells
-   */
-  public boolean isEmpty() {
-    for (ImmutableSegment s : segments) {
-      if (!s.isEmpty()) return false;
-    }
-    return true;
-  }
-
-  /**
-   * @return number of cells in segment
-   */
-  public int getCellsCount() {
-    int result = 0;
-    for (ImmutableSegment s : segments) {
-      result += s.getCellsCount();
-    }
-    return result;
-  }
-
-  /**
-   * @return the first cell in the segment that has equal or greater key than the given cell
-   */
-  public Cell getFirstAfter(Cell cell) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * Closing a segment before it is being discarded
-   */
-  public void close() {
-    if (flushingScanner != null) {
-      flushingScanner.close();
-      flushingScanner = null;
-    }
-    for (ImmutableSegment s : segments) {
-      s.close();
-    }
-  }
-
-  /**
-   * If the segment has a memory allocator the cell is being cloned to this space, and returned;
-   * otherwise the given cell is returned
-   * @return either the given cell or its clone
-   */
-  public Cell maybeCloneWithAllocator(Cell cell) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public boolean shouldSeek(Scan scan, long oldestUnexpiredTS){
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public long getMinTimestamp(){
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * Creates the scanner for the given read point
-   * @return a scanner for the given read point
-   */
-  public KeyValueScanner getScanner(long readPoint) {
-    KeyValueScanner resultScanner;
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
-    for (ImmutableSegment s : segments) {
-      list.add(s.getScanner(readPoint));
-    }
-
-    try {
-      resultScanner = new MemStoreScanner(getComparator(), list);
-    } catch (IOException ie) {
-      throw new IllegalStateException(ie);
-    }
-
-    return resultScanner;
-  }
-
-  /**
-   * Creates the scanner for the given read point, and a specific order in a list
-   * @return a scanner for the given read point
-   */
-  public KeyValueScanner getScanner(long readPoint, long order) {
-    KeyValueScanner resultScanner;
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
-    for (ImmutableSegment s : segments) {
-      list.add(s.getScanner(readPoint,order));
-    }
-
-    try {
-      resultScanner = new MemStoreScanner(getComparator(), list);
-    } catch (IOException ie) {
-      throw new IllegalStateException(ie);
-    }
-
-    return resultScanner;
-  }
-
-  public boolean isTagsPresent() {
-    for (ImmutableSegment s : segments) {
-      if (s.isTagsPresent()) return true;
-    }
-    return false;
-  }
-
-  public void incScannerCount() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public void decScannerCount() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * Setting the CellSet of the segment - used only for flat immutable segment for setting
-   * immutable CellSet after its creation in immutable segment constructor
-   * @return this object
-   */
-
-  protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * @return Sum of all cell's size.
-   */
-  public long keySize() {
-    return this.keySize;
-  }
-
-  /**
-   * @return The heap overhead of this segment.
-   */
-  public long heapOverhead() {
-    long result = 0;
-    for (ImmutableSegment s : segments) {
-      result += s.heapOverhead();
-    }
-    return result;
-  }
-
-  /**
-   * Updates the heap size counter of the segment by the given delta
-   */
-  protected void incSize(long delta, long heapOverhead) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  protected void incHeapOverheadSize(long delta) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public long getMinSequenceId() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public TimeRangeTracker getTimeRangeTracker() {
-    return this.timeRangeTracker;
-  }
-
-  //*** Methods for SegmentsScanner
-  public Cell last() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public Iterator<Cell> iterator() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public int compare(Cell left, Cell right) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  public int compareRows(Cell left, Cell right) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * @return a set of all cells in the segment
-   */
-  protected CellSet getCellSet() {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * Returns the Cell comparator used by this segment
-   * @return the Cell comparator used by this segment
-   */
-  protected CellComparator getComparator() {
-    return comparator;
-  }
-
-  protected void internalAdd(Cell cell, boolean mslabUsed, MemstoreSize memstoreSize) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
-      MemstoreSize memstoreSize) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  protected long heapOverheadChange(Cell cell, boolean succ) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  /**
-   * Returns a subset of the segment cell set, which starts with the given cell
-   * @param firstCell a cell in the segment
-   * @return a subset of the segment cell set, which starts with the given cell
-   */
-  protected SortedSet<Cell> tailSet(Cell firstCell) {
-    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
-  }
-
-  // Debug methods
-  /**
-   * Dumps all cells of the segment into the given log
-   */
-  void dump(Log log) {
-    for (ImmutableSegment s : segments) {
-      s.dump(log);
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuilder sb =
-        new StringBuilder("This is CompositeImmutableSegment and those are its segments:: ");
-    for (ImmutableSegment s : segments) {
-      sb.append(s.toString());
-    }
-    return sb.toString();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 76442e1..d4e6e12 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -128,20 +127,30 @@ public class DefaultMemStore extends AbstractMemStore {
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
     List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(2);
     list.add(this.active.getScanner(readPt, 1));
-    list.addAll(this.snapshot.getScanners(readPt, 0));
-    return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
+    list.add(this.snapshot.getScanner(readPt, 0));
+    return Collections.<KeyValueScanner> singletonList(
+      new MemStoreScanner(getComparator(), list));
   }
 
-  // the getSegments() method is used for tests only
-  @VisibleForTesting
   @Override
-  protected List<Segment> getSegments() {
+  protected List<Segment> getSegments() throws IOException {
     List<Segment> list = new ArrayList<Segment>(2);
     list.add(this.active);
-    list.addAll(this.snapshot.getAllSegments());
+    list.add(this.snapshot);
     return list;
   }
 
+  /**
+   * @param cell Find the row that comes after this one.  If null, we return the
+   * first.
+   * @return Next row or null if none found.
+   */
+  Cell getNextRow(final Cell cell) {
+    return getLowest(
+        getNextRow(cell, this.active.getCellSet()),
+        getNextRow(cell, this.snapshot.getCellSet()));
+  }
+
   @Override public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b664a4a..e11a31c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
@@ -6484,8 +6483,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         final Configuration conf, final HTableDescriptor hTableDescriptor,
         final WAL wal, final boolean initialize)
   throws IOException {
-    LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor
-        + " RootDir = " + rootDir +
+    LOG.info("creating HRegion " + info.getTable().getNameAsString()
+        + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
         " Table name == " + info.getTable().getNameAsString());
     FileSystem fs = FileSystem.get(conf);
     Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index 547d332..4cdb29d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -30,10 +30,6 @@ import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.CollectionBackedScanner;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
 
 /**
  * ImmutableSegment is an abstract class that extends the API supported by a {@link Segment},
@@ -73,14 +69,6 @@ public class ImmutableSegment extends Segment {
 
   /////////////////////  CONSTRUCTORS  /////////////////////
   /**------------------------------------------------------------------------
-   * Empty C-tor to be used only for CompositeImmutableSegment
-   */
-  protected ImmutableSegment(CellComparator comparator) {
-    super(comparator);
-    this.timeRange = null;
-  }
-
-  /**------------------------------------------------------------------------
    * Copy C-tor to be used when new ImmutableSegment is being built from a Mutable one.
    * This C-tor should be used when active MutableSegment is pushed into the compaction
    * pipeline and becomes an ImmutableSegment.
@@ -154,15 +142,6 @@ public class ImmutableSegment extends Segment {
     return this.timeRange.getMin();
   }
 
-  public long getNumOfSegments() {
-    return 1;
-  }
-
-  public List<Segment> getAllSegments() {
-    List<Segment> res = new ArrayList<Segment>(Arrays.asList(this));
-    return res;
-  }
-
   /**------------------------------------------------------------------------
    * Change the CellSet of this ImmutableSegment from one based on ConcurrentSkipListMap to one
    * based on CellArrayMap.
@@ -253,7 +232,7 @@ public class ImmutableSegment extends Segment {
     Cell curCell;
     int idx = 0;
     // create this segment scanner with maximal possible read point, to go over all Cells
-    KeyValueScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
+    SegmentScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
 
     try {
       while ((curCell = segmentScanner.next()) != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index 29fd78a..84f88f0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -56,7 +56,7 @@ public class MemStoreCompactor {
 
   // The upper bound for the number of segments we store in the pipeline prior to merging.
   // This constant is subject to further experimentation.
-  private static final int THRESHOLD_PIPELINE_SEGMENTS = 30; // stands here for infinity
+  private static final int THRESHOLD_PIPELINE_SEGMENTS = 1;
 
   private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class);
 
@@ -276,8 +276,6 @@ public class MemStoreCompactor {
     case NONE: action = Action.NOOP;
       break;
     case BASIC: action = Action.MERGE;
-      // if multiple segments appear in the pipeline flush them to the disk later together
-      compactingMemStore.setCompositeSnapshot(true);
       break;
     case EAGER: action = Action.COMPACT;
       break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
index fa7c342..77cea51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
@@ -25,32 +25,19 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class MemstoreSize {
 
+  static final MemstoreSize EMPTY_SIZE = new MemstoreSize();
+
   private long dataSize;
   private long heapOverhead;
-  final private boolean isEmpty;
-
-  static final MemstoreSize EMPTY_SIZE = new MemstoreSize(true);
 
   public MemstoreSize() {
     dataSize = 0;
     heapOverhead = 0;
-    isEmpty = false;
-  }
-
-  public MemstoreSize(boolean isEmpty) {
-    dataSize = 0;
-    heapOverhead = 0;
-    this.isEmpty = isEmpty;
-  }
-
-  public boolean isEmpty() {
-    return isEmpty;
   }
 
   public MemstoreSize(long dataSize, long heapOverhead) {
     this.dataSize = dataSize;
     this.heapOverhead = heapOverhead;
-    this.isEmpty = false;
   }
 
   public void incMemstoreSize(long dataSize, long heapOverhead) {
@@ -74,13 +61,11 @@ public class MemstoreSize {
   }
 
   public long getDataSize() {
-
-    return isEmpty ? 0 : dataSize;
+    return dataSize;
   }
 
   public long getHeapOverhead() {
-
-    return isEmpty ? 0 : heapOverhead;
+    return heapOverhead;
   }
 
   @Override
@@ -89,7 +74,7 @@ public class MemstoreSize {
       return false;
     }
     MemstoreSize other = (MemstoreSize) obj;
-    return getDataSize() == other.dataSize && getHeapOverhead() == other.heapOverhead;
+    return this.dataSize == other.dataSize && this.heapOverhead == other.heapOverhead;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index 8581517..afdfe6f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -18,9 +18,7 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.util.ArrayList;
 import java.util.Iterator;
-import java.util.List;
 import java.util.SortedSet;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
@@ -66,15 +64,6 @@ public abstract class Segment {
   protected final TimeRangeTracker timeRangeTracker;
   protected volatile boolean tagsPresent;
 
-  // Empty constructor to be used when Segment is used as interface,
-  // and there is no need in true Segments state
-  protected Segment(CellComparator comparator) {
-    this.comparator = comparator;
-    this.dataSize = new AtomicLong(0);
-    this.heapOverhead = new AtomicLong(0);
-    this.timeRangeTracker = new TimeRangeTracker();
-  }
-
   // This constructor is used to create empty Segments.
   protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB) {
     this.cellSet.set(cellSet);
@@ -102,7 +91,7 @@ public abstract class Segment {
    * Creates the scanner for the given read point
    * @return a scanner for the given read point
    */
-  public KeyValueScanner getScanner(long readPoint) {
+  public SegmentScanner getScanner(long readPoint) {
     return new SegmentScanner(this, readPoint);
   }
 
@@ -110,16 +99,10 @@ public abstract class Segment {
    * Creates the scanner for the given read point, and a specific order in a list
    * @return a scanner for the given read point
    */
-  public KeyValueScanner getScanner(long readPoint, long order) {
+  public SegmentScanner getScanner(long readPoint, long order) {
     return new SegmentScanner(this, readPoint, order);
   }
 
-  public List<KeyValueScanner> getScanners(long readPoint, long order) {
-    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(1);
-    scanners.add(getScanner(readPoint, order));
-    return scanners;
-  }
-
   /**
    * @return whether the segment has any cells
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
index 7e53026..01e07ef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
@@ -47,13 +47,6 @@ public final class SegmentFactory {
     return new ImmutableSegment(comparator, iterator, MemStoreLAB.newInstance(conf));
   }
 
-  // create composite immutable segment from a list of segments
-  public CompositeImmutableSegment createCompositeImmutableSegment(
-      final CellComparator comparator, List<ImmutableSegment> segments) {
-    return new CompositeImmutableSegment(comparator, segments);
-
-  }
-
   // create new flat immutable segment from compacting old immutable segments
   public ImmutableSegment createImmutableSegmentByCompaction(final Configuration conf,
       final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells,
@@ -109,9 +102,6 @@ public final class SegmentFactory {
 
   private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) {
     List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>();
-    if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) {
-      return null;
-    }
     for (ImmutableSegment segment : segments) {
       mslabs.add(segment.getMemStoreLAB());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index 0c1880c..b0b63a9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -137,7 +137,6 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
         CellComparator.COMPARATOR, store, regionServicesForStores,
         HColumnDescriptor.MemoryCompaction.EAGER);
-
     this.memstore.add(kv1.clone(), null);
     // As compaction is starting in the background the repetition
     // of the k1 might be removed BUT the scanners created earlier
@@ -178,9 +177,6 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     // Add more versions to make it a little more interesting.
     Thread.sleep(1);
     addRows(this.memstore);
-    ((CompactingMemStore)this.memstore).setCompositeSnapshot(true);
-
-
     Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty,
         new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
@@ -281,9 +277,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
 
     this.memstore.upsert(l, 2, null);// readpoint is 2
     MemstoreSize newSize = this.memstore.size();
-    assertTrue("\n<<< The old size is " + oldSize.getDataSize() + " and the new size is "
-        + newSize.getDataSize() + "\n",
-        newSize.getDataSize() > oldSize.getDataSize());
+    assert (newSize.getDataSize() > oldSize.getDataSize());
     //The kv1 should be removed.
     assert (memstore.getActive().getCellsCount() == 2);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 93d28d5..27ed295 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -65,6 +65,8 @@ import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
 import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.lang.management.MemoryMXBean;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -178,10 +180,6 @@ public class TestDefaultMemStore {
     // Now assert can count same number even if a snapshot mid-scan.
     s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
     count = 0;
-
-//    assertTrue("\n<<< The memstore scanners without snapshot are: \n" + memstorescanners
-//        + "\n",false);
-
     try {
       while (s.next(result)) {
         LOG.info(result);
@@ -209,10 +207,8 @@ public class TestDefaultMemStore {
     s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
     count = 0;
     int snapshotIndex = 5;
-
     try {
       while (s.next(result)) {
-
         LOG.info(result);
         // Assert the stuff is coming out in right order.
         assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
@@ -220,7 +216,6 @@ public class TestDefaultMemStore {
         assertEquals("count=" + count + ", result=" + result, rowCount, result.size());
         count++;
         if (count == snapshotIndex) {
-
           MemStoreSnapshot snapshot = this.memstore.snapshot();
           this.memstore.clearSnapshot(snapshot.getId());
           // Added more rows into kvset.  But the scanner wont see these rows.
@@ -232,8 +227,7 @@ public class TestDefaultMemStore {
     } finally {
       s.close();
     }
-    assertEquals("\n<<< The row count is " + rowCount + " and the iteration count is " + count,
-        rowCount, count);
+    assertEquals(rowCount, count);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/79e5efd3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 332a125..133c53b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -22,7 +22,13 @@ import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -32,7 +38,6 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
-import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -50,48 +55,40 @@ public class TestWalAndCompactingMemStoreFlush {
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion");
-  public static final TableName TABLENAME =
-      TableName.valueOf("TestWalAndCompactingMemStoreFlush", "t1");
+  public static final TableName TABLENAME = TableName.valueOf("TestWalAndCompactingMemStoreFlush",
+      "t1");
 
-  public static final byte[][] FAMILIES =
-      { Bytes.toBytes("f1"), Bytes.toBytes("f2"), Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") };
+  public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"),
+      Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") };
 
   public static final byte[] FAMILY1 = FAMILIES[0];
   public static final byte[] FAMILY2 = FAMILIES[1];
   public static final byte[] FAMILY3 = FAMILIES[2];
 
   private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
-    MemstoreSize memstrsize1 = MemstoreSize.EMPTY_SIZE;
-    assertEquals(memstrsize1.getDataSize(), 0);
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
-    int i = 0;
+    int i=0;
     HTableDescriptor htd = new HTableDescriptor(TABLENAME);
     for (byte[] family : FAMILIES) {
       HColumnDescriptor hcd = new HColumnDescriptor(family);
       // even column families are going to have compacted memstore
-
       if(i%2 == 0) {
         hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf(
             conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY)));
       } else {
         hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
       }
-
       htd.addFamily(hcd);
       i++;
     }
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+
     HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
     Path path = new Path(DIR, callingMethod);
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
-    HRegion result = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
-    return result;
+    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
   }
 
   // A helper function to create puts.
   private Put createPut(int familyNum, int putNum) {
-    byte[] qf = Bytes.toBytes("q" + familyNum);
+    byte[] qf  = Bytes.toBytes("q" + familyNum);
     byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum);
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     Put p = new Put(row);
@@ -101,7 +98,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
   // A helper function to create double puts, so something can be compacted later.
   private Put createDoublePut(int familyNum, int putNum) {
-    byte[] qf = Bytes.toBytes("q" + familyNum);
+    byte[] qf  = Bytes.toBytes("q" + familyNum);
     byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum);
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     Put p = new Put(row);
@@ -125,21 +122,16 @@ public class TestWalAndCompactingMemStoreFlush {
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family));
     assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum),
-        r.getFamilyMap(family).get(qf));
+      r.getFamilyMap(family).get(qf));
     assertTrue(("Incorrect value for Put#" + putNum + " for CF# " + familyNum),
-        Arrays.equals(r.getFamilyMap(family).get(qf), val));
+      Arrays.equals(r.getFamilyMap(family).get(qf), val));
   }
 
-  @Before public void setUp() throws Exception {
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
-  }
-
-  // test selective flush with data-compaction
   @Test(timeout = 180000)
   public void testSelectiveFlushWithEager() throws IOException {
+
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
-
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
     conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
         FlushNonSloppyStoresFirstPolicy.class.getName());
@@ -183,14 +175,17 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
 
     // Get the overall smallest LSN in the region's memstores.
-    long smallestSeqInRegionCurrentMemstorePhaseI =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
     String s = "\n\n----------------------------------\n"
-        + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:"
-        + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:" + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:"
-        + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:" + cf3MemstoreSizePhaseI
-        + ", is CF3 compacted memstore?:" + region.getStore(FAMILY3).isSloppyMemstore() + "\n";
+        + "Upon initial insert and before any flush, size of CF1 is:"
+        + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:"
+        + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:"
+        + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:"
+        + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:"
+        + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:"
+        + region.getStore(FAMILY3).isSloppyMemstore() + "\n";
 
     // The overall smallest LSN in the region's memstores should be the same as
     // the LSN of the smallest edit in CF1
@@ -205,12 +200,12 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    String msg = "totalMemstoreSize=" + totalMemstoreSize +
-        " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI +
-        " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI +
-        " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI;
-    assertEquals(msg, totalMemstoreSize,
-        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+    String msg = "totalMemstoreSize="+totalMemstoreSize +
+        " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
+        " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
+        " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
+    assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize()
+        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
 
     // Flush!!!!!!!!!!!!!!!!!!!!!!
     // We have big compacting memstore CF1 and two small memstores:
@@ -230,8 +225,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore();
 
-    long smallestSeqInRegionCurrentMemstorePhaseII =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     // Find the smallest LSNs for edits wrt to each CF.
     long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2);
@@ -265,20 +260,16 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseII
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseII + ", " +
-        "the smallest sequence in CF2:" + smallestSeqCF2PhaseII + ", the smallest sequence in CF3:"
-        + smallestSeqCF3PhaseII + "\n";
+        "the smallest sequence in CF2:"
+        + smallestSeqCF2PhaseII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseII + "\n";
 
     // How much does the CF1 memstore occupy? Will be used later.
     MemstoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getSizeOfMemStore();
     long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1);
 
     s = s + "----After more puts into CF1 its size is:" + cf1MemstoreSizePhaseIII
-        + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n"
-        + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
-        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region
-        .getStore(FAMILY3).getFlushedCellsSize() + ", cf4: " + region.getStore(FAMILIES[4])
-        .getFlushedCellsSize() + "; the entire region size is: " + region.getMemstoreSize() + "\n";
-    ;
+        + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n" ;
+
 
     // Flush!!!!!!!!!!!!!!!!!!!!!!
     // Flush again, CF1 is flushed to disk
@@ -291,22 +282,21 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getSizeOfMemStore();
 
-    long smallestSeqInRegionCurrentMemstorePhaseIV =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2);
     long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
 
     s = s + "----After SECOND FLUSH, CF1 size is:" + cf1MemstoreSizePhaseIV + ", CF2 size is:"
-        + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV + "\n" + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
-        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region
-        .getStore(FAMILY3).getFlushedCellsSize() + ", cf4: " + region.getStore(FAMILIES[4])
-        .getFlushedCellsSize() + "\n";
+        + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV
+        + "\n";
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " +
-        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + ", the smallest sequence in CF3:"
-        + smallestSeqCF3PhaseIV + "\n" + "the entire region size is: " + region.getMemstoreSize() + "\n";
+        "the smallest sequence in CF2:"
+        + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV
+        + "\n";
 
     // CF1's pipeline component (inserted before first flush) should be flushed to disk
     // CF2 should be flushed to disk
@@ -331,21 +321,13 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseV =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
-    assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSizePhaseV);
+    assertEquals(MemstoreSize.EMPTY_SIZE , cf1MemstoreSizePhaseV);
     assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseV);
     assertEquals(MemstoreSize.EMPTY_SIZE, cf3MemstoreSizePhaseV);
 
-    s = s + "----AFTER THIRD FLUSH, the entire region size is:" + region.getMemstoreSize()
-        + " (empty memstore size is " + MemstoreSize.EMPTY_SIZE
-        + "), while the sizes of each memstore are as following \ncf1: " + cf1MemstoreSizePhaseV
-        + ", cf2: " + cf2MemstoreSizePhaseV + ", cf3: " + cf3MemstoreSizePhaseV + ", cf4: " + region
-        .getStore(FAMILIES[4]).getSizeOfMemStore() + "\n" + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
-        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region.getStore(FAMILY3).getFlushedCellsSize()
-        + ", cf4: " + region.getStore(FAMILIES[4]).getFlushedCellsSize() + "\n";
-
     // What happens when we hit the memstore limit, but we are not able to find
     // any Column Family above the threshold?
     // In that case, we should flush all the CFs.
@@ -363,22 +345,24 @@ public class TestWalAndCompactingMemStoreFlush {
 
     region.flush(false);
 
-    s = s + "----AFTER FORTH FLUSH, The smallest sequence in region WAL is: "
+    s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: "
         + smallestSeqInRegionCurrentMemstorePhaseV
         + ". After additional inserts and last flush, the entire region size is:" + region
-        .getMemstoreSize() + "\n----------------------------------\n";
+        .getMemstoreSize()
+        + "\n----------------------------------\n";
 
     // Since we won't find any CF above the threshold, and hence no specific
     // store to flush, we should flush all the memstores
     // Also compacted memstores are flushed to disk.
-    assertEquals(s, 0, region.getMemstoreSize());
+    assertEquals(0, region.getMemstoreSize());
     System.out.println(s);
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
   /*------------------------------------------------------------------------------*/
   /* Check the same as above but for index-compaction type of compacting memstore */
-  @Test(timeout = 180000) public void testSelectiveFlushWithIndexCompaction() throws IOException {
+  @Test(timeout = 180000)
+  public void testSelectiveFlushWithIndexCompaction() throws IOException {
 
     /*------------------------------------------------------------------------------*/
     /* SETUP */
@@ -395,7 +379,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // Initialize the region
     Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+
     /*------------------------------------------------------------------------------*/
     /* PHASE I - insertions */
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
@@ -426,8 +410,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
     // Get the overall smallest LSN in the region's memstores.
-    long smallestSeqInRegionCurrentMemstorePhaseI =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE I - validation */
@@ -443,8 +427,8 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    assertEquals(totalMemstoreSizePhaseI,
-        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+    assertEquals(totalMemstoreSizePhaseI, cf1MemstoreSizePhaseI.getDataSize()
+        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE I - Flush */
@@ -475,8 +459,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseII =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     // Find the smallest LSNs for edits wrt to each CF.
     long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
     long totalMemstoreSizePhaseII = region.getMemstoreSize();
@@ -484,13 +468,13 @@ public class TestWalAndCompactingMemStoreFlush {
     /*------------------------------------------------------------------------------*/
     /* PHASE II - validation */
     // CF1 was flushed to memory, should be flattened and take less space
-    assertEquals(cf1MemstoreSizePhaseII.getDataSize(), cf1MemstoreSizePhaseI.getDataSize());
+    assertEquals(cf1MemstoreSizePhaseII.getDataSize() , cf1MemstoreSizePhaseI.getDataSize());
     assertTrue(cf1MemstoreSizePhaseII.getHeapOverhead() < cf1MemstoreSizePhaseI.getHeapOverhead());
     // CF2 should become empty
     assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
     // verify that CF3 was flushed to memory and was not compacted (this is an approximation check)
     // if compacted CF# should be at least twice less because its every key was duplicated
-    assertEquals(cf3MemstoreSizePhaseII.getDataSize(), cf3MemstoreSizePhaseI.getDataSize());
+    assertEquals(cf3MemstoreSizePhaseII.getDataSize() , cf3MemstoreSizePhaseI.getDataSize());
     assertTrue(
         cf3MemstoreSizePhaseI.getHeapOverhead() / 2 < cf3MemstoreSizePhaseII.getHeapOverhead());
 
@@ -500,8 +484,8 @@ public class TestWalAndCompactingMemStoreFlush {
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3. Counting the empty active segments in CF1/2/3 and pipeline
     // items in CF1/2
-    assertEquals(totalMemstoreSizePhaseII,
-        cf1MemstoreSizePhaseII.getDataSize() + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
+    assertEquals(totalMemstoreSizePhaseII, cf1MemstoreSizePhaseII.getDataSize()
+        + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /*------------------------------------------------------------------------------*/
@@ -529,8 +513,8 @@ public class TestWalAndCompactingMemStoreFlush {
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3. Counting the empty active segments in CF1/2/3 and pipeline
     // items in CF1/2
-    assertEquals(totalMemstoreSizePhaseIII,
-        cf1MemstoreSizePhaseIII.getDataSize() + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
+    assertEquals(totalMemstoreSizePhaseIII, cf1MemstoreSizePhaseIII.getDataSize()
+        + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE III - Flush */
@@ -546,8 +530,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseIV =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
 
     /*------------------------------------------------------------------------------*/
@@ -577,8 +561,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseV =
-        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
+        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long totalMemstoreSizePhaseV = region.getMemstoreSize();
 
     /*------------------------------------------------------------------------------*/
@@ -633,30 +617,22 @@ public class TestWalAndCompactingMemStoreFlush {
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
-  // test WAL behavior together with selective flush while data-compaction
-  @Test(timeout = 180000) public void testDCwithWAL() throws IOException {
-
-    MemstoreSize checkSize = MemstoreSize.EMPTY_SIZE;
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+  @Test(timeout = 180000)
+  public void testSelectiveFlushAndWALinDataCompaction() throws IOException {
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
-    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
-        FlushNonSloppyStoresFirstPolicy.class.getName());
-    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
+    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class
+        .getName());
+    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 *
+        1024);
     conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
     // set memstore to do data compaction and not to use the speculative scan
     conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
         String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
 
-    MemstoreSize memstrsize1 = MemstoreSize.EMPTY_SIZE;
-    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
     // Intialize the HRegion
     HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
-
-    MemstoreSize cf2MemstoreSizePhase0 = region.getStore(FAMILY2).getSizeOfMemStore();
-    MemstoreSize cf1MemstoreSizePhase0 = region.getStore(FAMILY1).getSizeOfMemStore();
-
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
       region.put(createPut(1, i));
@@ -676,7 +652,6 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // Find the sizes of the memstores of each CF.
     MemstoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getSizeOfMemStore();
-    //boolean oldCF2 = region.getStore(FAMILY2).isSloppyMemstore();
     MemstoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
 
@@ -687,20 +662,16 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    String msg = "\n<<< totalMemstoreSize=" + totalMemstoreSize +
-        " DefaultMemStore.DEEP_OVERHEAD=" + DefaultMemStore.DEEP_OVERHEAD +
-        " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI +
-        " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI +
-        " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI;
-    assertEquals(msg, totalMemstoreSize,
-        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize()
-            + cf3MemstoreSizePhaseI.getDataSize());
+    String msg = "totalMemstoreSize="+totalMemstoreSize +
+        " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD +
+        " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
+        " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
+        " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
+    assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize()
+        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
 
     // Flush!
     CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore;
-    MemStore cms2 = ((HStore) region.getStore(FAMILY2)).memstore;
-    MemstoreSize memstrsize2 = cms2.getSnapshotSize();
-    MemstoreSize flshsize2 = cms2.getFlushableSize();
     CompactingMemStore cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore;
     cms1.flushInMemory();
     cms3.flushInMemory();
@@ -713,22 +684,15 @@ public class TestWalAndCompactingMemStoreFlush {
     long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2);
     long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
-    MemstoreSize newSize = new MemstoreSize();
 
     // CF2 should have been cleared
-    assertEquals(
-        msg + "\n<<< CF2 is compacting " + ((HStore) region.getStore(FAMILY2)).memstore.isSloppy()
-            + ", snapshot and flushable size BEFORE flush " + memstrsize2 + "; " + flshsize2
-            + ", snapshot and flushable size AFTER flush " + cms2.getSnapshotSize() + "; " + cms2
-            .getFlushableSize() + "\n<<< cf2 size " + cms2.size() + "; the checked size "
-            + cf2MemstoreSizePhaseII + "; memstore empty size " + MemstoreSize.EMPTY_SIZE
-            + "; check size " + checkSize + "\n<<< first first first CF2 size "
-            + cf2MemstoreSizePhase0 + "; first first first CF1 size " + cf1MemstoreSizePhase0
-            + "; new new new size " + newSize + "\n", MemstoreSize.EMPTY_SIZE,
-        cf2MemstoreSizePhaseII);
-
-    String s = "\n\n----------------------------------\n" + "Upon initial insert and flush, LSN of CF1 is:"
-        + smallestSeqCF1PhaseII + ". LSN of CF2 is:" + smallestSeqCF2PhaseII + ". LSN of CF3 is:" + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:"
+    assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
+
+    String s = "\n\n----------------------------------\n"
+        + "Upon initial insert and flush, LSN of CF1 is:"
+        + smallestSeqCF1PhaseII + ". LSN of CF2 is:"
+        + smallestSeqCF2PhaseII + ". LSN of CF3 is:"
+        + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:"
         + smallestSeqInRegionCurrentMemstorePhaseII + "\n";
 
     // Add same entries to compact them later
@@ -754,8 +718,8 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIII
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIII + ", " +
-        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIII + ", the smallest sequence in CF3:"
-        + smallestSeqCF3PhaseIII + "\n";
+        "the smallest sequence in CF2:"
+        + smallestSeqCF2PhaseIII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n";
 
     // Flush!
     cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore;
@@ -772,22 +736,20 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " +
-        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + ", the smallest sequence in CF3:"
-        + smallestSeqCF3PhaseIV + "\n";
+        "the smallest sequence in CF2:"
+        + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n";
 
     // now check that the LSN of the entire WAL, of CF1 and of CF3 has progressed due to compaction
-    assertTrue(s,
-        smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII);
+    assertTrue(s, smallestSeqInRegionCurrentMemstorePhaseIV >
+        smallestSeqInRegionCurrentMemstorePhaseIII);
     assertTrue(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII);
     assertTrue(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII);
 
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
-  // test WAL behavior together with selective flush while index-compaction
   @Test(timeout = 180000)
-  public void tstICwithWAL() throws IOException {
-
+  public void testSelectiveFlushAndWALinIndexCompaction() throws IOException {
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024);


[19/50] [abbrv] hbase git commit: HBASE-16010 Put draining function through Admin API (Matt Warhaftig)

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index f4e7da6..0a000ee 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -568,6 +568,27 @@ message SecurityCapabilitiesResponse {
   repeated Capability capabilities = 1;
 }
 
+message ListDrainingRegionServersRequest {
+}
+
+message ListDrainingRegionServersResponse {
+ repeated ServerName server_name = 1;
+}
+
+message DrainRegionServersRequest {
+ repeated ServerName server_name = 1;
+}
+
+message DrainRegionServersResponse {
+}
+
+message RemoveDrainFromRegionServersRequest {
+ repeated ServerName server_name = 1;
+}
+
+message RemoveDrainFromRegionServersResponse {
+}
+
 service MasterService {
   /** Used by the client to get the number of regions that have received the updated schema */
   rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -863,4 +884,16 @@ service MasterService {
   /** Disable a replication peer */
   rpc DisableReplicationPeer(DisableReplicationPeerRequest)
     returns(DisableReplicationPeerResponse);
+
+  /** Returns a list of ServerNames marked as draining. */
+  rpc listDrainingRegionServers(ListDrainingRegionServersRequest)
+    returns(ListDrainingRegionServersResponse);
+
+  /** Mark a list of ServerNames as draining. */
+  rpc drainRegionServers(DrainRegionServersRequest)
+    returns(DrainRegionServersResponse);
+
+  /** Unmark a list of ServerNames marked as draining. */
+  rpc removeDrainFromRegionServers(RemoveDrainFromRegionServersRequest)
+    returns(RemoveDrainFromRegionServersResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 6b135d9..613c5c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3196,4 +3196,54 @@ public class HMaster extends HRegionServer implements MasterServices {
       cpHost.postDisableReplicationPeer(peerId);
     }
   }
+
+  @Override
+  public void drainRegionServer(final ServerName server) {
+    String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+    try {
+      String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+      ZKUtil.createAndFailSilent(getZooKeeper(), node);
+    } catch (KeeperException ke) {
+      LOG.warn(this.zooKeeper.prefix("Unable to add drain for '" + server.getServerName() + "'."),
+        ke);
+    }
+  }
+
+  @Override
+  public List<ServerName> listDrainingRegionServers() {
+    String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+    List<ServerName> serverNames = new ArrayList<ServerName>();
+    List<String> serverStrs = null;
+    try {
+      serverStrs = ZKUtil.listChildrenNoWatch(getZooKeeper(), parentZnode);
+    } catch (KeeperException ke) {
+      LOG.warn(this.zooKeeper.prefix("Unable to list draining servers."), ke);
+    }
+    // No nodes is empty draining list or ZK connectivity issues.
+    if (serverStrs == null) {
+      return serverNames;
+    }
+
+    // Skip invalid ServerNames in result
+    for (String serverStr : serverStrs) {
+      try {
+        serverNames.add(ServerName.parseServerName(serverStr));
+      } catch (IllegalArgumentException iae) {
+        LOG.warn("Unable to cast '" + serverStr + "' to ServerName.", iae);
+      }
+    }
+    return serverNames;
+  }
+
+  @Override
+  public void removeDrainFromRegionServer(ServerName server) {
+    String parentZnode = getZooKeeper().znodePaths.drainingZNode;
+    String node = ZKUtil.joinZNode(parentZnode, server.getServerName());
+    try {
+      ZKUtil.deleteNodeFailSilent(getZooKeeper(), node);
+    } catch (KeeperException ke) {
+      LOG.warn(
+        this.zooKeeper.prefix("Unable to remove drain for '" + server.getServerName() + "'."), ke);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 8ee72c6..76da838 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1693,4 +1693,55 @@ public class MasterRpcServices extends RSRpcServices
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public ListDrainingRegionServersResponse listDrainingRegionServers(RpcController controller,
+      ListDrainingRegionServersRequest request) throws ServiceException {
+    ListDrainingRegionServersResponse.Builder response =
+        ListDrainingRegionServersResponse.newBuilder();
+    try {
+      master.checkInitialized();
+      List<ServerName> servers = master.listDrainingRegionServers();
+      for (ServerName server : servers) {
+        response.addServerName(ProtobufUtil.toServerName(server));
+      }
+    } catch (IOException io) {
+      throw new ServiceException(io);
+    }
+
+    return response.build();
+  }
+
+  @Override
+  public DrainRegionServersResponse drainRegionServers(RpcController controller,
+      DrainRegionServersRequest request) throws ServiceException {
+    DrainRegionServersResponse.Builder response = DrainRegionServersResponse.newBuilder();
+    try {
+      master.checkInitialized();
+      for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
+        master.drainRegionServer(ProtobufUtil.toServerName(pbServer));
+      }
+    } catch (IOException io) {
+      throw new ServiceException(io);
+    }
+
+    return response.build();
+  }
+
+  @Override
+  public RemoveDrainFromRegionServersResponse removeDrainFromRegionServers(RpcController controller,
+      RemoveDrainFromRegionServersRequest request) throws ServiceException {
+    RemoveDrainFromRegionServersResponse.Builder response =
+        RemoveDrainFromRegionServersResponse.newBuilder();
+    try {
+      master.checkInitialized();
+      for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
+        master.removeDrainFromRegionServer(ProtobufUtil.toServerName(pbServer));
+      }
+    } catch (IOException io) {
+      throw new ServiceException(io);
+    }
+
+    return response.build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index a7395bb..869e7ac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
@@ -443,4 +444,23 @@ public interface MasterServices extends Server {
    * @param peerId a short name that identifies the peer
    */
   void disableReplicationPeer(String peerId) throws ReplicationException, IOException;
+
+  /**
+   * Mark a region server as draining to prevent additional regions from getting assigned to it.
+   * @param server Region servers to drain.
+   */
+  void drainRegionServer(final ServerName server);
+
+  /**
+   * List region servers marked as draining to not get additional regions assigned to them.
+   * @return List of draining servers.
+   */
+  List<ServerName> listDrainingRegionServers();
+
+  /**
+   * Remove drain from a region server to allow additional regions assignments.
+   * @param server Region server to remove drain from.
+   */
+  void removeDrainFromRegionServer(final ServerName server);
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index b1cf1d2..62fde74 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -24,8 +24,14 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -758,4 +764,75 @@ public class TestAdmin2 {
     ProcedureInfo[] procList = admin.listProcedures();
     assertTrue(procList.length >= 0);
   }
+
+  /*
+   * Test that invalid draining server names (invalid start code) don't get added to drain list.
+   */
+  @Test(timeout = 10000, expected = IllegalArgumentException.class)
+  public void testCheckDrainServerName() throws Exception {
+    List<ServerName> servers = new ArrayList<ServerName>();
+    servers.add(ServerName.parseServerName("127.0.0.1:123"));
+    admin.drainRegionServers(servers);
+  }
+
+  /*
+   * This test drains all regions so cannot be run in parallel with other tests.
+   */
+  @Test(timeout = 30000)
+  public void testDrainRegionServers() throws Exception {
+    List<ServerName> drainingServers = admin.listDrainingRegionServers();
+    assertTrue(drainingServers.isEmpty());
+
+    // Drain all region servers.
+    Collection<ServerName> clusterServers = admin.getClusterStatus().getServers();
+    drainingServers = new ArrayList<ServerName>();
+    for (ServerName server : clusterServers) {
+      drainingServers.add(server);
+    }
+    admin.drainRegionServers(drainingServers);
+
+    // Check that drain lists all region servers.
+    drainingServers = admin.listDrainingRegionServers();
+    assertEquals(clusterServers.size(), drainingServers.size());
+    for (ServerName server : clusterServers) {
+      assertTrue(drainingServers.contains(server));
+    }
+
+    // Try for 20 seconds to create table (new region). Will not complete because all RSs draining.
+    TableName hTable = TableName.valueOf("testDrainRegionServer");
+    final HTableDescriptor htd = new HTableDescriptor(hTable);
+    htd.addFamily(new HColumnDescriptor("cf"));
+
+    final Runnable createTable = new Thread() {
+      @Override
+      public void run() {
+        try {
+          admin.createTable(htd);
+        } catch (IOException ioe) {
+          assertTrue(false); // Should not get IOException.
+        }
+      }
+    };
+
+    final ExecutorService executor = Executors.newSingleThreadExecutor();
+    final java.util.concurrent.Future<?> future = executor.submit(createTable);
+    executor.shutdown();
+    try {
+      future.get(20, TimeUnit.SECONDS);
+    } catch (TimeoutException ie) {
+      assertTrue(true); // Expecting timeout to happen.
+    }
+
+    // Kill executor if still processing.
+    if (!executor.isTerminated()) {
+      executor.shutdownNow();
+      assertTrue(true);
+    }
+
+    // Remove drain list.
+    admin.removeDrainFromRegionServers(drainingServers);
+    drainingServers = admin.listDrainingRegionServers();
+    assertTrue(drainingServers.isEmpty());
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 4e85d29..2a5be12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -399,4 +399,19 @@ public class MockNoopMasterServices implements MasterServices, Server {
   @Override
   public void disableReplicationPeer(String peerId) throws ReplicationException, IOException {
   }
+
+  @Override
+  public void drainRegionServer(ServerName server) {
+    return;
+  }
+
+  @Override
+  public List<ServerName> listDrainingRegionServers() {
+    return null;
+  }
+
+  @Override
+  public void removeDrainFromRegionServer(ServerName servers) {
+    return;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/992e5717/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
index 7326327..485c1f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertTrue;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
 
 import javax.security.auth.login.AppConfigurationEntry;
@@ -319,5 +320,25 @@ public class TestZooKeeperACL {
     }
   }
 
+  @Test(timeout = 10000)
+  public void testAdminDrainAllowedOnSecureZK() throws Exception {
+    if (!secureZKAvailable) {
+      return;
+    }
+    List<ServerName> drainingServers = new ArrayList<ServerName>();
+    drainingServers.add(ServerName.parseServerName("ZZZ,123,123"));
+
+    // If unable to connect to secure ZK cluster then this operation would fail.
+    TEST_UTIL.getAdmin().drainRegionServers(drainingServers);
+
+    drainingServers = TEST_UTIL.getAdmin().listDrainingRegionServers();
+    assertEquals(1, drainingServers.size());
+    assertEquals(ServerName.parseServerName("ZZZ,123,123"), drainingServers.get(0));
+
+    TEST_UTIL.getAdmin().removeDrainFromRegionServers(drainingServers);
+    drainingServers = TEST_UTIL.getAdmin().listDrainingRegionServers();
+    assertEquals(0, drainingServers.size());
+  }
+
 }
 


[05/50] [abbrv] hbase git commit: HBASE-17352 Fix hbase-assembly build with bash 4 (Junegunn Choi)

Posted by sy...@apache.org.
HBASE-17352 Fix hbase-assembly build with bash 4 (Junegunn Choi)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/acd0218d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/acd0218d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/acd0218d

Branch: refs/heads/hbase-12439
Commit: acd0218d91bac9410f7b9bc68f66aa065fd47d55
Parents: cac0904
Author: tedyu <yu...@gmail.com>
Authored: Wed Dec 21 08:41:34 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Dec 21 08:41:34 2016 -0800

----------------------------------------------------------------------
 hbase-assembly/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/acd0218d/hbase-assembly/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 185e681..b9d8dcc 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -138,7 +138,7 @@
                 <argument>bash</argument>
                 <argument>-c</argument>
                 <argument>cat maven-shared-archive-resources/META-INF/NOTICE \
-                  `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` \
+                  `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt`
                 </argument>
               </arguments>
               <outputFile>${project.build.directory}/NOTICE.aggregate</outputFile>


[08/50] [abbrv] hbase git commit: HBASE-17262 Refactor RpcServer so as to make it extendable and/or pluggable

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index 5a9178a..a1a73c1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -46,7 +46,9 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoResponseProto;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EmptyRequestProto;
@@ -55,7 +57,6 @@ import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.PauseReq
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.Interface;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.util.StringUtils;
@@ -77,27 +78,6 @@ public abstract class AbstractTestIPC {
   static KeyValue BIG_CELL = new KeyValue(CELL_BYTES, CELL_BYTES, CELL_BYTES, BIG_CELL_BYTES);
   static final Configuration CONF = HBaseConfiguration.create();
 
-  /**
-   * Instance of server. We actually don't do anything speical in here so could just use
-   * HBaseRpcServer directly.
-   */
-  static class TestRpcServer extends RpcServer {
-
-    TestRpcServer() throws IOException {
-      this(new FifoRpcScheduler(CONF, 1), CONF);
-    }
-
-    TestRpcServer(Configuration conf) throws IOException {
-      this(new FifoRpcScheduler(conf, 1), conf);
-    }
-
-    TestRpcServer(RpcScheduler scheduler, Configuration conf) throws IOException {
-      super(null, "testRpcServer",
-          Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)),
-          new InetSocketAddress("localhost", 0), conf, scheduler);
-    }
-  }
-
   protected abstract AbstractRpcClient<?> createRpcClientNoCodec(Configuration conf);
 
   /**
@@ -106,7 +86,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testNoCodec() throws IOException, ServiceException {
     Configuration conf = HBaseConfiguration.create();
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClientNoCodec(conf)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -136,7 +119,10 @@ public abstract class AbstractTestIPC {
     for (int i = 0; i < count; i++) {
       cells.add(CELL);
     }
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClient(conf)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -163,7 +149,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testRTEDuringConnectionSetup() throws Exception {
     Configuration conf = HBaseConfiguration.create();
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClientRTEDuringConnectionSetup(conf)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -183,7 +172,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testRpcScheduler() throws IOException, ServiceException, InterruptedException {
     RpcScheduler scheduler = spy(new FifoRpcScheduler(CONF, 1));
-    RpcServer rpcServer = new TestRpcServer(scheduler, CONF);
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer",
+        Lists.newArrayList(new BlockingServiceAndInterface(SERVICE, null)),
+        new InetSocketAddress("localhost", 0), CONF, scheduler);
     verify(scheduler).init((RpcScheduler.Context) anyObject());
     try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
       rpcServer.start();
@@ -205,7 +197,10 @@ public abstract class AbstractTestIPC {
   public void testRpcMaxRequestSize() throws IOException, ServiceException {
     Configuration conf = new Configuration(CONF);
     conf.setInt(RpcServer.MAX_REQUEST_SIZE, 1000);
-    RpcServer rpcServer = new TestRpcServer(conf);
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), conf,
+        new FifoRpcScheduler(conf, 1));
     try (AbstractRpcClient<?> client = createRpcClient(conf)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -236,7 +231,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testRpcServerForNotNullRemoteAddressInCallObject()
       throws IOException, ServiceException {
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     InetSocketAddress localAddr = new InetSocketAddress("localhost", 0);
     try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
       rpcServer.start();
@@ -250,7 +248,10 @@ public abstract class AbstractTestIPC {
 
   @Test
   public void testRemoteError() throws IOException, ServiceException {
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -267,7 +268,10 @@ public abstract class AbstractTestIPC {
 
   @Test
   public void testTimeout() throws IOException {
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());
@@ -295,7 +299,7 @@ public abstract class AbstractTestIPC {
     }
   }
 
-  static class TestFailingRpcServer extends TestRpcServer {
+  static class TestFailingRpcServer extends SimpleRpcServer {
 
     TestFailingRpcServer() throws IOException {
       this(new FifoRpcScheduler(CONF, 1), CONF);
@@ -306,7 +310,9 @@ public abstract class AbstractTestIPC {
     }
 
     TestFailingRpcServer(RpcScheduler scheduler, Configuration conf) throws IOException {
-      super(scheduler, conf);
+      super(null, "testRpcServer", Lists
+          .newArrayList(new BlockingServiceAndInterface(SERVICE, null)),
+          new InetSocketAddress("localhost", 0), conf, scheduler);
     }
 
     class FailingConnection extends Connection {
@@ -349,7 +355,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testAsyncEcho() throws IOException {
     Configuration conf = HBaseConfiguration.create();
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClient(conf)) {
       rpcServer.start();
       Interface stub = newStub(client, rpcServer.getListenerAddress());
@@ -377,7 +386,10 @@ public abstract class AbstractTestIPC {
   @Test
   public void testAsyncRemoteError() throws IOException {
     AbstractRpcClient<?> client = createRpcClient(CONF);
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try {
       rpcServer.start();
       Interface stub = newStub(client, rpcServer.getListenerAddress());
@@ -398,7 +410,10 @@ public abstract class AbstractTestIPC {
 
   @Test
   public void testAsyncTimeout() throws IOException {
-    TestRpcServer rpcServer = new TestRpcServer();
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+        "testRpcServer", Lists.newArrayList(new BlockingServiceAndInterface(
+            SERVICE, null)), new InetSocketAddress("localhost", 0), CONF,
+        new FifoRpcScheduler(CONF, 1));
     try (AbstractRpcClient<?> client = createRpcClient(CONF)) {
       rpcServer.start();
       Interface stub = newStub(client, rpcServer.getListenerAddress());

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
index 9a02d5b..b039003 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestProtoBufRpc.java
@@ -65,7 +65,7 @@ public class TestProtoBufRpc {
     log.setLevel(Level.TRACE);
     // Create server side implementation
     // Get RPC server for server side implementation
-    this.server = new RpcServer(null, "testrpc",
+    this.server = RpcServerFactory.createRpcServer(null, "testrpc",
         Lists.newArrayList(new RpcServer.BlockingServiceAndInterface(SERVICE, null)),
         new InetSocketAddress(ADDRESS, PORT), conf, new FifoRpcScheduler(conf, 10));
     InetSocketAddress address = server.getListenerAddress();

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
index 8eed01c..449899f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcHandlerException.java
@@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface;
 import org.apache.hadoop.hbase.testclassification.RPCTests;
@@ -36,6 +37,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import com.google.common.collect.Lists;
+
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
 
 @Category({ RPCTests.class, SmallTests.class })
@@ -44,19 +46,6 @@ public class TestRpcHandlerException {
   private final static Configuration CONF = HBaseConfiguration.create();
 
   /**
-   * Instance of server. We actually don't do anything speical in here so could just use
-   * HBaseRpcServer directly.
-   */
-  private static class TestRpcServer extends RpcServer {
-
-    TestRpcServer(RpcScheduler scheduler) throws IOException {
-      super(null, "testRpcServer",
-          Lists.newArrayList(new BlockingServiceAndInterface((BlockingService) SERVICE, null)),
-          new InetSocketAddress("localhost", 0), CONF, scheduler);
-    }
-  }
-
-  /**
    * Tests that the rpc scheduler is called when requests arrive. When Rpc handler thread dies, the
    * client will hang and the test will fail. The test is meant to be a unit test to test the
    * behavior.
@@ -85,7 +74,9 @@ public class TestRpcHandlerException {
     PriorityFunction qosFunction = mock(PriorityFunction.class);
     Abortable abortable = new AbortServer();
     RpcScheduler scheduler = new SimpleRpcScheduler(CONF, 2, 0, 0, qosFunction, abortable, 0);
-    RpcServer rpcServer = new TestRpcServer(scheduler);
+    RpcServer rpcServer = RpcServerFactory.createRpcServer(null, "testRpcServer",
+        Lists.newArrayList(new BlockingServiceAndInterface((BlockingService) SERVICE, null)),
+        new InetSocketAddress("localhost", 0), CONF, scheduler);
     try (BlockingRpcClient client = new BlockingRpcClient(CONF)) {
       rpcServer.start();
       BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress());

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java
index b7d6f87..c848250 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/TestSecureIPC.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
+import org.apache.hadoop.hbase.ipc.RpcServerFactory;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
@@ -250,7 +251,7 @@ public class TestSecureIPC {
 
     InetSocketAddress isa = new InetSocketAddress(HOST, 0);
 
-    RpcServerInterface rpcServer = new RpcServer(null, "AbstractTestSecureIPC",
+    RpcServerInterface rpcServer = RpcServerFactory.createRpcServer(null, "AbstractTestSecureIPC",
         Lists.newArrayList(new RpcServer.BlockingServiceAndInterface((BlockingService) SERVICE, null)), isa,
         serverConf, new FifoRpcScheduler(serverConf, 1));
     rpcServer.start();

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index b7517bf0..92eaecc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -26,7 +26,6 @@ import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
@@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
@@ -51,14 +49,12 @@ import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
-import org.apache.hadoop.hbase.ipc.RpcClient;
-import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.ipc.RpcServerFactory;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
-import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos.WhoAmIResponse;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.security.SecurityInfo;
@@ -78,7 +74,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.net.DNS;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.token.SecretManager;
@@ -188,8 +183,8 @@ public class TestTokenAuthentication {
       };
       sai.add(new BlockingServiceAndInterface(proxy,
         AuthenticationProtos.AuthenticationService.BlockingInterface.class));
-      this.rpcServer =
-        new RpcServer(this, "tokenServer", sai, initialIsa, conf, new FifoRpcScheduler(conf, 1));
+      this.rpcServer = RpcServerFactory.createRpcServer(this, "tokenServer", sai,
+          initialIsa, conf, new FifoRpcScheduler(conf, 1));
       InetSocketAddress address = rpcServer.getListenerAddress();
       if (address == null) {
         throw new IOException("Listener channel is closed");


[10/50] [abbrv] hbase git commit: HBASE-17262 Refactor RpcServer so as to make it extendable and/or pluggable

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 96f506f..d6a137b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -20,57 +20,20 @@ package org.apache.hadoop.hbase.ipc;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.InputStream;
-import java.net.BindException;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.ServerSocket;
-import java.net.Socket;
-import java.net.SocketException;
-import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
-import java.nio.channels.CancelledKeyException;
-import java.nio.channels.Channels;
-import java.nio.channels.ClosedChannelException;
-import java.nio.channels.GatheringByteChannel;
-import java.nio.channels.ReadableByteChannel;
-import java.nio.channels.SelectionKey;
-import java.nio.channels.Selector;
-import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
-import java.nio.channels.WritableByteChannel;
 import java.security.GeneralSecurityException;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
-import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedDeque;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
 
 import javax.security.sasl.Sasl;
-import javax.security.sasl.SaslException;
 import javax.security.sasl.SaslServer;
 
 import org.apache.commons.crypto.cipher.CryptoCipherFactory;
@@ -82,19 +45,16 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CallQueueTooBigException;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.codec.Codec;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.RegionMovedException;
 import org.apache.hadoop.hbase.exceptions.RequestTooBigException;
 import org.apache.hadoop.hbase.io.ByteBufferListOutputStream;
-import org.apache.hadoop.hbase.io.ByteBufferOutputStream;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
 import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES;
 import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
@@ -103,6 +63,21 @@ import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.MultiByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.AuthMethod;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
+import org.apache.hadoop.hbase.security.SaslUtil;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.VersionInfo;
@@ -113,81 +88,37 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResp
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation;
-import org.apache.hadoop.hbase.security.AccessDeniedException;
-import org.apache.hadoop.hbase.security.AuthMethod;
-import org.apache.hadoop.hbase.security.HBasePolicyProvider;
-import org.apache.hadoop.hbase.security.HBaseSaslRpcServer;
-import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslDigestCallbackHandler;
-import org.apache.hadoop.hbase.security.HBaseSaslRpcServer.SaslGssCallbackHandler;
-import org.apache.hadoop.hbase.security.SaslStatus;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.UserProvider;
-import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.htrace.TraceInfo;
 import org.codehaus.jackson.map.ObjectMapper;
 
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteInput;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * An RPC server that hosts protobuf described Services.
  *
- * An RpcServer instance has a Listener that hosts the socket.  Listener has fixed number
- * of Readers in an ExecutorPool, 10 by default.  The Listener does an accept and then
- * round robin a Reader is chosen to do the read.  The reader is registered on Selector.  Read does
- * total read off the channel and the parse from which it makes a Call.  The call is wrapped in a
- * CallRunner and passed to the scheduler to be run.  Reader goes back to see if more to be done
- * and loops till done.
- *
- * <p>Scheduler can be variously implemented but default simple scheduler has handlers to which it
- * has given the queues into which calls (i.e. CallRunner instances) are inserted.  Handlers run
- * taking from the queue.  They run the CallRunner#run method on each item gotten from queue
- * and keep taking while the server is up.
- *
- * CallRunner#run executes the call.  When done, asks the included Call to put itself on new
- * queue for Responder to pull from and return result to client.
- *
- * @see BlockingRpcClient
  */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
-public class RpcServer implements RpcServerInterface, ConfigurationObserver {
+public abstract class RpcServer implements RpcServerInterface,
+    ConfigurationObserver {
   // LOG is being used in CallRunner and the log level is being changed in tests
   public static final Log LOG = LogFactory.getLog(RpcServer.class);
-  private static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
+  protected static final CallQueueTooBigException CALL_QUEUE_TOO_BIG_EXCEPTION
       = new CallQueueTooBigException();
 
   private final boolean authorize;
-  private boolean isSecurityEnabled;
+  protected boolean isSecurityEnabled;
 
   public static final byte CURRENT_VERSION = 0;
 
@@ -200,14 +131,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   /**
    * How many calls/handler are allowed in the queue.
    */
-  static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
+  protected static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
 
-  private final CellBlockBuilder cellBlockBuilder;
+  protected final CellBlockBuilder cellBlockBuilder;
 
-  private static final String AUTH_FAILED_FOR = "Auth failed for ";
-  private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
-  private static final Log AUDITLOG = LogFactory.getLog("SecurityLogger." +
-    Server.class.getName());
+  protected static final String AUTH_FAILED_FOR = "Auth failed for ";
+  protected static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
+  protected static final Log AUDITLOG = LogFactory.getLog("SecurityLogger."
+      + Server.class.getName());
   protected SecretManager<TokenIdentifier> secretManager;
   protected ServiceAuthorizationManager authManager;
 
@@ -218,13 +149,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       new ThreadLocal<RpcCall>();
 
   /** Keeps MonitoredRPCHandler per handler thread. */
-  static final ThreadLocal<MonitoredRPCHandler> MONITORED_RPC
+  protected static final ThreadLocal<MonitoredRPCHandler> MONITORED_RPC
       = new ThreadLocal<MonitoredRPCHandler>();
 
   protected final InetSocketAddress bindAddress;
-  protected int port;                             // port we listen on
-  protected InetSocketAddress address;            // inet address we listen on
-  private int readThreads;                        // number of read threads
+
   protected MetricsHBaseServer metrics;
 
   protected final Configuration conf;
@@ -236,12 +165,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    * call queue size gets incremented after we parse a call and before we add it to the queue of
    * calls for the scheduler to use. It get decremented after we have 'run' the Call. The current
    * size is kept in {@link #callQueueSizeInBytes}.
-   * @see {@link #callQueueSizeInBytes}
-   * @see {@link #DEFAULT_MAX_CALLQUEUE_SIZE}
-   * @see {@link #callQueueSizeInBytes}
+   * @see #callQueueSizeInBytes
+   * @see #DEFAULT_MAX_CALLQUEUE_SIZE
    */
-  private final long maxQueueSizeInBytes;
-  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
+  protected final long maxQueueSizeInBytes;
+  protected static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
 
   /**
    * This is a running count of the size in bytes of all outstanding calls whether currently
@@ -249,10 +177,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   protected final LongAdder callQueueSizeInBytes = new LongAdder();
 
-  protected int socketSendBufferSize;
-  protected final boolean tcpNoDelay;   // if T then disable Nagle's Algorithm
+  protected final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
   protected final boolean tcpKeepAlive; // if T then use keepalives
-  protected final long purgeTimeout;    // in milliseconds
 
   /**
    * This flag is used to indicate to sub threads when they should go down.  When we call
@@ -267,55 +193,51 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   volatile boolean started = false;
 
-  // maintains the set of client connections and handles idle timeouts
-  private ConnectionManager connectionManager;
-  private Listener listener = null;
-  protected Responder responder = null;
   protected AuthenticationTokenSecretManager authTokenSecretMgr = null;
 
   protected HBaseRPCErrorHandler errorHandler = null;
 
-  static final String MAX_REQUEST_SIZE = "hbase.ipc.max.request.size";
-  private static final RequestTooBigException REQUEST_TOO_BIG_EXCEPTION =
+  protected static final String MAX_REQUEST_SIZE = "hbase.ipc.max.request.size";
+  protected static final RequestTooBigException REQUEST_TOO_BIG_EXCEPTION =
       new RequestTooBigException();
 
-  private static final String WARN_RESPONSE_TIME = "hbase.ipc.warn.response.time";
-  private static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size";
+  protected static final String WARN_RESPONSE_TIME = "hbase.ipc.warn.response.time";
+  protected static final String WARN_RESPONSE_SIZE = "hbase.ipc.warn.response.size";
 
   /**
    * Minimum allowable timeout (in milliseconds) in rpc request's header. This
    * configuration exists to prevent the rpc service regarding this request as timeout immediately.
    */
-  private static final String MIN_CLIENT_REQUEST_TIMEOUT = "hbase.ipc.min.client.request.timeout";
-  private static final int DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT = 20;
+  protected static final String MIN_CLIENT_REQUEST_TIMEOUT = "hbase.ipc.min.client.request.timeout";
+  protected static final int DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT = 20;
 
   /** Default value for above params */
-  private static final int DEFAULT_MAX_REQUEST_SIZE = DEFAULT_MAX_CALLQUEUE_SIZE / 4; // 256M
-  private static final int DEFAULT_WARN_RESPONSE_TIME = 10000; // milliseconds
-  private static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024;
+  protected static final int DEFAULT_MAX_REQUEST_SIZE = DEFAULT_MAX_CALLQUEUE_SIZE / 4; // 256M
+  protected static final int DEFAULT_WARN_RESPONSE_TIME = 10000; // milliseconds
+  protected static final int DEFAULT_WARN_RESPONSE_SIZE = 100 * 1024 * 1024;
 
-  private static final ObjectMapper MAPPER = new ObjectMapper();
+  protected static final ObjectMapper MAPPER = new ObjectMapper();
 
-  private final int maxRequestSize;
-  private final int warnResponseTime;
-  private final int warnResponseSize;
+  protected final int maxRequestSize;
+  protected final int warnResponseTime;
+  protected final int warnResponseSize;
 
-  private final int minClientRequestTimeout;
+  protected final int minClientRequestTimeout;
 
-  private final Server server;
-  private final List<BlockingServiceAndInterface> services;
+  protected final Server server;
+  protected final List<BlockingServiceAndInterface> services;
 
-  private final RpcScheduler scheduler;
+  protected final RpcScheduler scheduler;
 
-  private UserProvider userProvider;
+  protected UserProvider userProvider;
 
-  private final ByteBufferPool reservoir;
+  protected final ByteBufferPool reservoir;
   // The requests and response will use buffers from ByteBufferPool, when the size of the
   // request/response is at least this size.
   // We make this to be 1/6th of the pool buffer size.
-  private final int minSizeForReservoirUse;
+  protected final int minSizeForReservoirUse;
 
-  private volatile boolean allowFallbackToSimpleAuth;
+  protected volatile boolean allowFallbackToSimpleAuth;
 
   /**
    * Used to get details for scan with a scanner_id<br/>
@@ -327,8 +249,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    * Datastructure that holds all necessary to a method invocation and then afterward, carries
    * the result.
    */
+  @InterfaceStability.Evolving
   @InterfaceAudience.Private
-  public class Call implements RpcCall {
+  public abstract class Call implements RpcCall {
     protected int id;                             // the client's call id
     protected BlockingService service;
     protected MethodDescriptor md;
@@ -347,17 +270,16 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
      * Chain of buffers to send as response.
      */
     protected BufferChain response;
-    protected Responder responder;
 
     protected long size;                          // size of current call
     protected boolean isError;
     protected TraceInfo tinfo;
-    private ByteBufferListOutputStream cellBlockStream = null;
-    private CallCleanup reqCleanup = null;
+    protected ByteBufferListOutputStream cellBlockStream = null;
+    protected CallCleanup reqCleanup = null;
 
-    private User user;
-    private InetAddress remoteAddress;
-    private RpcCallback rpcCallback;
+    protected User user;
+    protected InetAddress remoteAddress;
+    protected RpcCallback rpcCallback;
 
     private long responseCellSize = 0;
     private long responseBlockSize = 0;
@@ -365,10 +287,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
         justification="Can't figure why this complaint is happening... see below")
-    Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header,
-         Message param, CellScanner cellScanner, Connection connection, Responder responder,
-         long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
-         CallCleanup reqCleanup) {
+    Call(int id, final BlockingService service, final MethodDescriptor md,
+        RequestHeader header, Message param, CellScanner cellScanner,
+        Connection connection, long size, TraceInfo tinfo,
+        final InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
       this.id = id;
       this.service = service;
       this.md = md;
@@ -378,7 +300,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       this.connection = connection;
       this.timestamp = System.currentTimeMillis();
       this.response = null;
-      this.responder = responder;
       this.isError = false;
       this.size = size;
       this.tinfo = tinfo;
@@ -392,20 +313,20 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     }
 
     /**
-     * Call is done. Execution happened and we returned results to client. It is now safe to
-     * cleanup.
+     * Call is done. Execution happened and we returned results to client. It is
+     * now safe to cleanup.
      */
-    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
-        justification="Presume the lock on processing request held by caller is protection enough")
+    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "IS2_INCONSISTENT_SYNC",
+        justification = "Presume the lock on processing request held by caller is protection enough")
     void done() {
       if (this.cellBlockStream != null) {
-        this.cellBlockStream.releaseResources();// This will return back the BBs which we
-                                                // got from pool.
+        // This will return back the BBs which we got from pool.
+        this.cellBlockStream.releaseResources();
         this.cellBlockStream = null;
       }
-      cleanup();// If the call was run successfuly, we might have already returned the
-                             // BB back to pool. No worries..Then inputCellBlock will be null
-      this.connection.decRpcCount();  // Say that we're done with this call.
+      // If the call was run successfuly, we might have already returned the BB
+      // back to pool. No worries..Then inputCellBlock will be null
+      cleanup();
     }
 
     @Override
@@ -428,10 +349,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return this.header;
     }
 
-    public boolean hasPriority() {
-      return this.header.hasPriority();
-    }
-
     @Override
     public int getPriority() {
       return this.header.getPriority();
@@ -669,15 +586,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     }
 
     @Override
-    public long disconnectSince() {
-      if (!connection.channel.isOpen()) {
-        return System.currentTimeMillis() - timestamp;
-      } else {
-        return -1L;
-      }
-    }
-
-    @Override
     public long getResponseCellSize() {
       return responseCellSize;
     }
@@ -708,13 +616,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     }
 
     @Override
-    public synchronized void sendResponseIfReady() throws IOException {
-      // set param null to reduce memory pressure
-      this.param = null;
-      this.responder.doRespond(this);
-    }
-
-    @Override
     public User getRequestUser() {
       return user;
     }
@@ -773,7 +674,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     @Override
     public void setReceiveTime(long t) {
       this.timestamp = t;
-
     }
 
     @Override
@@ -784,7 +684,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     @Override
     public void setStartTime(long t) {
       this.startTime = t;
-
     }
 
     @Override
@@ -805,653 +704,73 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   @FunctionalInterface
-  static interface CallCleanup {
+  protected static interface CallCleanup {
     void run();
   }
 
-  /** Listens on the socket. Creates jobs for the handler threads*/
-  private class Listener extends Thread {
-
-    private ServerSocketChannel acceptChannel = null; //the accept channel
-    private Selector selector = null; //the selector that we use for the server
-    private Reader[] readers = null;
-    private int currentReader = 0;
-    private final int readerPendingConnectionQueueLength;
-
-    private ExecutorService readPool;
-
-    public Listener(final String name) throws IOException {
-      super(name);
-      // The backlog of requests that we will have the serversocket carry.
-      int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
-      readerPendingConnectionQueueLength =
-          conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
-      // Create a new server socket and set to non blocking mode
-      acceptChannel = ServerSocketChannel.open();
-      acceptChannel.configureBlocking(false);
-
-      // Bind the server socket to the binding addrees (can be different from the default interface)
-      bind(acceptChannel.socket(), bindAddress, backlogLength);
-      port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
-      address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
-      // create a selector;
-      selector = Selector.open();
-
-      readers = new Reader[readThreads];
-      // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it
-      // has an advantage in that it is easy to shutdown the pool.
-      readPool = Executors.newFixedThreadPool(readThreads,
-        new ThreadFactoryBuilder().setNameFormat(
-          "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
-          ",port=" + port).setDaemon(true)
-        .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build());
-      for (int i = 0; i < readThreads; ++i) {
-        Reader reader = new Reader();
-        readers[i] = reader;
-        readPool.execute(reader);
-      }
-      LOG.info(getName() + ": started " + readThreads + " reader(s) listening on port=" + port);
-
-      // Register accepts on the server socket with the selector.
-      acceptChannel.register(selector, SelectionKey.OP_ACCEPT);
-      this.setName("RpcServer.listener,port=" + port);
-      this.setDaemon(true);
-    }
-
-
-    private class Reader implements Runnable {
-      final private LinkedBlockingQueue<Connection> pendingConnections;
-      private final Selector readSelector;
-
-      Reader() throws IOException {
-        this.pendingConnections =
-          new LinkedBlockingQueue<Connection>(readerPendingConnectionQueueLength);
-        this.readSelector = Selector.open();
-      }
-
-      @Override
-      public void run() {
-        try {
-          doRunLoop();
-        } finally {
-          try {
-            readSelector.close();
-          } catch (IOException ioe) {
-            LOG.error(getName() + ": error closing read selector in " + getName(), ioe);
-          }
-        }
-      }
-
-      private synchronized void doRunLoop() {
-        while (running) {
-          try {
-            // Consume as many connections as currently queued to avoid
-            // unbridled acceptance of connections that starves the select
-            int size = pendingConnections.size();
-            for (int i=size; i>0; i--) {
-              Connection conn = pendingConnections.take();
-              conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
-            }
-            readSelector.select();
-            Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
-            while (iter.hasNext()) {
-              SelectionKey key = iter.next();
-              iter.remove();
-              if (key.isValid()) {
-                if (key.isReadable()) {
-                  doRead(key);
-                }
-              }
-              key = null;
-            }
-          } catch (InterruptedException e) {
-            if (running) {                      // unexpected -- log it
-              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
-            }
-            return;
-          } catch (IOException ex) {
-            LOG.info(getName() + ": IOException in Reader", ex);
-          }
-        }
-      }
-
-      /**
-       * Updating the readSelector while it's being used is not thread-safe,
-       * so the connection must be queued.  The reader will drain the queue
-       * and update its readSelector before performing the next select
-       */
-      public void addConnection(Connection conn) throws IOException {
-        pendingConnections.add(conn);
-        readSelector.wakeup();
-      }
-    }
-
-    @Override
-    @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="IS2_INCONSISTENT_SYNC",
-      justification="selector access is not synchronized; seems fine but concerned changing " +
-        "it will have per impact")
-    public void run() {
-      LOG.info(getName() + ": starting");
-      connectionManager.startIdleScan();
-      while (running) {
-        SelectionKey key = null;
-        try {
-          selector.select(); // FindBugs IS2_INCONSISTENT_SYNC
-          Iterator<SelectionKey> iter = selector.selectedKeys().iterator();
-          while (iter.hasNext()) {
-            key = iter.next();
-            iter.remove();
-            try {
-              if (key.isValid()) {
-                if (key.isAcceptable())
-                  doAccept(key);
-              }
-            } catch (IOException ignored) {
-              if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
-            }
-            key = null;
-          }
-        } catch (OutOfMemoryError e) {
-          if (errorHandler != null) {
-            if (errorHandler.checkOOME(e)) {
-              LOG.info(getName() + ": exiting on OutOfMemoryError");
-              closeCurrentConnection(key, e);
-              connectionManager.closeIdle(true);
-              return;
-            }
-          } else {
-            // we can run out of memory if we have too many threads
-            // log the event and sleep for a minute and give
-            // some thread(s) a chance to finish
-            LOG.warn(getName() + ": OutOfMemoryError in server select", e);
-            closeCurrentConnection(key, e);
-            connectionManager.closeIdle(true);
-            try {
-              Thread.sleep(60000);
-            } catch (InterruptedException ex) {
-              LOG.debug("Interrupted while sleeping");
-            }
-          }
-        } catch (Exception e) {
-          closeCurrentConnection(key, e);
-        }
-      }
-      LOG.info(getName() + ": stopping");
-      synchronized (this) {
-        try {
-          acceptChannel.close();
-          selector.close();
-        } catch (IOException ignored) {
-          if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
-        }
-
-        selector= null;
-        acceptChannel= null;
-
-        // close all connections
-        connectionManager.stopIdleScan();
-        connectionManager.closeAll();
-      }
-    }
-
-    private void closeCurrentConnection(SelectionKey key, Throwable e) {
-      if (key != null) {
-        Connection c = (Connection)key.attachment();
-        if (c != null) {
-          closeConnection(c);
-          key.attach(null);
-        }
-      }
-    }
-
-    InetSocketAddress getAddress() {
-      return address;
-    }
-
-    void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
-      ServerSocketChannel server = (ServerSocketChannel) key.channel();
-      SocketChannel channel;
-      while ((channel = server.accept()) != null) {
-        channel.configureBlocking(false);
-        channel.socket().setTcpNoDelay(tcpNoDelay);
-        channel.socket().setKeepAlive(tcpKeepAlive);
-        Reader reader = getReader();
-        Connection c = connectionManager.register(channel);
-        // If the connectionManager can't take it, close the connection.
-        if (c == null) {
-          if (channel.isOpen()) {
-            IOUtils.cleanup(null, channel);
-          }
-          continue;
-        }
-        key.attach(c);  // so closeCurrentConnection can get the object
-        reader.addConnection(c);
-      }
-    }
-
-    void doRead(SelectionKey key) throws InterruptedException {
-      int count;
-      Connection c = (Connection) key.attachment();
-      if (c == null) {
-        return;
-      }
-      c.setLastContact(System.currentTimeMillis());
-      try {
-        count = c.readAndProcess();
-      } catch (InterruptedException ieo) {
-        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
-        throw ieo;
-      } catch (Exception e) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(getName() + ": Caught exception while reading:", e);
-        }
-        count = -1; //so that the (count < 0) block is executed
-      }
-      if (count < 0) {
-        closeConnection(c);
-        c = null;
-      } else {
-        c.setLastContact(System.currentTimeMillis());
-      }
-    }
-
-    synchronized void doStop() {
-      if (selector != null) {
-        selector.wakeup();
-        Thread.yield();
-      }
-      if (acceptChannel != null) {
-        try {
-          acceptChannel.socket().close();
-        } catch (IOException e) {
-          LOG.info(getName() + ": exception in closing listener socket. " + e);
-        }
-      }
-      readPool.shutdownNow();
-    }
-
-    // The method that will return the next reader to work with
-    // Simplistic implementation of round robin for now
-    Reader getReader() {
-      currentReader = (currentReader + 1) % readers.length;
-      return readers[currentReader];
-    }
-  }
-
-  // Sends responses of RPC back to clients.
-  protected class Responder extends Thread {
-    private final Selector writeSelector;
-    private final Set<Connection> writingCons =
-        Collections.newSetFromMap(new ConcurrentHashMap<Connection, Boolean>());
-
-    Responder() throws IOException {
-      this.setName("RpcServer.responder");
-      this.setDaemon(true);
-      this.setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER);
-      writeSelector = Selector.open(); // create a selector
-    }
-
-    @Override
-    public void run() {
-      LOG.debug(getName() + ": starting");
-      try {
-        doRunLoop();
-      } finally {
-        LOG.info(getName() + ": stopping");
-        try {
-          writeSelector.close();
-        } catch (IOException ioe) {
-          LOG.error(getName() + ": couldn't close write selector", ioe);
-        }
-      }
-    }
-
-    /**
-     * Take the list of the connections that want to write, and register them
-     * in the selector.
-     */
-    private void registerWrites() {
-      Iterator<Connection> it = writingCons.iterator();
-      while (it.hasNext()) {
-        Connection c = it.next();
-        it.remove();
-        SelectionKey sk = c.channel.keyFor(writeSelector);
-        try {
-          if (sk == null) {
-            try {
-              c.channel.register(writeSelector, SelectionKey.OP_WRITE, c);
-            } catch (ClosedChannelException e) {
-              // ignore: the client went away.
-              if (LOG.isTraceEnabled()) LOG.trace("ignored", e);
-            }
-          } else {
-            sk.interestOps(SelectionKey.OP_WRITE);
-          }
-        } catch (CancelledKeyException e) {
-          // ignore: the client went away.
-          if (LOG.isTraceEnabled()) LOG.trace("ignored", e);
-        }
-      }
-    }
-
-    /**
-     * Add a connection to the list that want to write,
-     */
-    public void registerForWrite(Connection c) {
-      if (writingCons.add(c)) {
-        writeSelector.wakeup();
-      }
-    }
-
-    private void doRunLoop() {
-      long lastPurgeTime = 0;   // last check for old calls.
-      while (running) {
-        try {
-          registerWrites();
-          int keyCt = writeSelector.select(purgeTimeout);
-          if (keyCt == 0) {
-            continue;
-          }
-
-          Set<SelectionKey> keys = writeSelector.selectedKeys();
-          Iterator<SelectionKey> iter = keys.iterator();
-          while (iter.hasNext()) {
-            SelectionKey key = iter.next();
-            iter.remove();
-            try {
-              if (key.isValid() && key.isWritable()) {
-                doAsyncWrite(key);
-              }
-            } catch (IOException e) {
-              LOG.debug(getName() + ": asyncWrite", e);
-            }
-          }
-
-          lastPurgeTime = purge(lastPurgeTime);
-
-        } catch (OutOfMemoryError e) {
-          if (errorHandler != null) {
-            if (errorHandler.checkOOME(e)) {
-              LOG.info(getName() + ": exiting on OutOfMemoryError");
-              return;
-            }
-          } else {
-            //
-            // we can run out of memory if we have too many threads
-            // log the event and sleep for a minute and give
-            // some thread(s) a chance to finish
-            //
-            LOG.warn(getName() + ": OutOfMemoryError in server select", e);
-            try {
-              Thread.sleep(60000);
-            } catch (InterruptedException ex) {
-              LOG.debug("Interrupted while sleeping");
-              return;
-            }
-          }
-        } catch (Exception e) {
-          LOG.warn(getName() + ": exception in Responder " +
-              StringUtils.stringifyException(e), e);
-        }
-      }
-      LOG.info(getName() + ": stopped");
-    }
-
-    /**
-     * If there were some calls that have not been sent out for a
-     * long time, we close the connection.
-     * @return the time of the purge.
-     */
-    private long purge(long lastPurgeTime) {
-      long now = System.currentTimeMillis();
-      if (now < lastPurgeTime + purgeTimeout) {
-        return lastPurgeTime;
-      }
-
-      ArrayList<Connection> conWithOldCalls = new ArrayList<Connection>();
-      // get the list of channels from list of keys.
-      synchronized (writeSelector.keys()) {
-        for (SelectionKey key : writeSelector.keys()) {
-          Connection connection = (Connection) key.attachment();
-          if (connection == null) {
-            throw new IllegalStateException("Coding error: SelectionKey key without attachment.");
-          }
-          Call call = connection.responseQueue.peekFirst();
-          if (call != null && now > call.timestamp + purgeTimeout) {
-            conWithOldCalls.add(call.connection);
-          }
-        }
-      }
-
-      // Seems safer to close the connection outside of the synchronized loop...
-      for (Connection connection : conWithOldCalls) {
-        closeConnection(connection);
-      }
-
-      return now;
-    }
-
-    private void doAsyncWrite(SelectionKey key) throws IOException {
-      Connection connection = (Connection) key.attachment();
-      if (connection == null) {
-        throw new IOException("doAsyncWrite: no connection");
-      }
-      if (key.channel() != connection.channel) {
-        throw new IOException("doAsyncWrite: bad channel");
-      }
-
-      if (processAllResponses(connection)) {
-        try {
-          // We wrote everything, so we don't need to be told when the socket is ready for
-          //  write anymore.
-         key.interestOps(0);
-        } catch (CancelledKeyException e) {
-          /* The Listener/reader might have closed the socket.
-           * We don't explicitly cancel the key, so not sure if this will
-           * ever fire.
-           * This warning could be removed.
-           */
-          LOG.warn("Exception while changing ops : " + e);
-        }
-      }
-    }
-
-    /**
-     * Process the response for this call. You need to have the lock on
-     * {@link org.apache.hadoop.hbase.ipc.RpcServer.Connection#responseWriteLock}
-     *
-     * @param call the call
-     * @return true if we proceed the call fully, false otherwise.
-     * @throws IOException
-     */
-    private boolean processResponse(final Call call) throws IOException {
-      boolean error = true;
-      try {
-        // Send as much data as we can in the non-blocking fashion
-        long numBytes = channelWrite(call.connection.channel, call.response);
-        if (numBytes < 0) {
-          throw new HBaseIOException("Error writing on the socket " +
-            "for the call:" + call.toShortString());
-        }
-        error = false;
-      } finally {
-        if (error) {
-          LOG.debug(getName() + call.toShortString() + ": output error -- closing");
-          // We will be closing this connection itself. Mark this call as done so that all the
-          // buffer(s) it got from pool can get released
-          call.done();
-          closeConnection(call.connection);
-        }
-      }
-
-      if (!call.response.hasRemaining()) {
-        call.done();
-        return true;
-      } else {
-        return false; // Socket can't take more, we will have to come back.
-      }
-    }
-
-    /**
-     * Process all the responses for this connection
-     *
-     * @return true if all the calls were processed or that someone else is doing it.
-     * false if there * is still some work to do. In this case, we expect the caller to
-     * delay us.
-     * @throws IOException
-     */
-    private boolean processAllResponses(final Connection connection) throws IOException {
-      // We want only one writer on the channel for a connection at a time.
-      connection.responseWriteLock.lock();
-      try {
-        for (int i = 0; i < 20; i++) {
-          // protection if some handlers manage to need all the responder
-          Call call = connection.responseQueue.pollFirst();
-          if (call == null) {
-            return true;
-          }
-          if (!processResponse(call)) {
-            connection.responseQueue.addFirst(call);
-            return false;
-          }
-        }
-      } finally {
-        connection.responseWriteLock.unlock();
-      }
-
-      return connection.responseQueue.isEmpty();
-    }
-
-    //
-    // Enqueue a response from the application.
-    //
-    void doRespond(Call call) throws IOException {
-      boolean added = false;
-
-      // If there is already a write in progress, we don't wait. This allows to free the handlers
-      //  immediately for other tasks.
-      if (call.connection.responseQueue.isEmpty() && call.connection.responseWriteLock.tryLock()) {
-        try {
-          if (call.connection.responseQueue.isEmpty()) {
-            // If we're alone, we can try to do a direct call to the socket. It's
-            //  an optimisation to save on context switches and data transfer between cores..
-            if (processResponse(call)) {
-              return; // we're done.
-            }
-            // Too big to fit, putting ahead.
-            call.connection.responseQueue.addFirst(call);
-            added = true; // We will register to the selector later, outside of the lock.
-          }
-        } finally {
-          call.connection.responseWriteLock.unlock();
-        }
-      }
-
-      if (!added) {
-        call.connection.responseQueue.addLast(call);
-      }
-      call.responder.registerForWrite(call.connection);
-
-      // set the serve time when the response has to be sent later
-      call.timestamp = System.currentTimeMillis();
-    }
-  }
-
   /** Reads calls from a connection and queues them for handling. */
   @edu.umd.cs.findbugs.annotations.SuppressWarnings(
       value="VO_VOLATILE_INCREMENT",
       justification="False positive according to http://sourceforge.net/p/findbugs/bugs/1032/")
-  public class Connection {
+  public abstract class Connection {
     // If initial preamble with version and magic has been read or not.
-    private boolean connectionPreambleRead = false;
+    protected boolean connectionPreambleRead = false;
     // If the connection header has been read or not.
-    private boolean connectionHeaderRead = false;
-    protected SocketChannel channel;
-    private ByteBuff data;
-    private CallCleanup callCleanup;
-    private ByteBuffer dataLengthBuffer;
-    protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<Call>();
-    private final Lock responseWriteLock = new ReentrantLock();
-    private LongAdder rpcCount = new LongAdder(); // number of outstanding rpcs
-    private long lastContact;
-    private InetAddress addr;
-    protected Socket socket;
+    protected boolean connectionHeaderRead = false;
+
+    protected CallCleanup callCleanup;
+
     // Cache the remote host & port info so that even if the socket is
     // disconnected, we can say where it used to connect to.
     protected String hostAddress;
     protected int remotePort;
-    ConnectionHeader connectionHeader;
+    protected InetAddress addr;
+    protected ConnectionHeader connectionHeader;
 
     /**
      * Codec the client asked use.
      */
-    private Codec codec;
+    protected Codec codec;
     /**
      * Compression codec the client asked us use.
      */
-    private CompressionCodec compressionCodec;
-    BlockingService service;
-
-    private AuthMethod authMethod;
-    private boolean saslContextEstablished;
-    private boolean skipInitialSaslHandshake;
-    private ByteBuffer unwrappedData;
-    // When is this set?  FindBugs wants to know!  Says NP
-    private ByteBuffer unwrappedDataLengthBuffer = ByteBuffer.allocate(4);
-    boolean useSasl;
-    SaslServer saslServer;
-    private CryptoAES cryptoAES;
-    private boolean useWrap = false;
-    private boolean useCryptoAesWrap = false;
+    protected CompressionCodec compressionCodec;
+    protected BlockingService service;
+
+    protected AuthMethod authMethod;
+    protected boolean saslContextEstablished;
+    protected boolean skipInitialSaslHandshake;
+
+    protected boolean useSasl;
+    protected SaslServer saslServer;
+    protected CryptoAES cryptoAES;
+    protected boolean useWrap = false;
+    protected boolean useCryptoAesWrap = false;
     // Fake 'call' for failed authorization response
-    private static final int AUTHORIZATION_FAILED_CALLID = -1;
-    private final Call authFailedCall = new Call(AUTHORIZATION_FAILED_CALLID, null, null, null,
-        null, null, this, null, 0, null, null, 0, null);
-    private ByteArrayOutputStream authFailedResponse =
+    protected static final int AUTHORIZATION_FAILED_CALLID = -1;
+
+    protected ByteArrayOutputStream authFailedResponse =
         new ByteArrayOutputStream();
     // Fake 'call' for SASL context setup
-    private static final int SASL_CALLID = -33;
-    private final Call saslCall = new Call(SASL_CALLID, null, null, null, null, null, this, null,
-        0, null, null, 0, null);
+    protected static final int SASL_CALLID = -33;
+
     // Fake 'call' for connection header response
-    private static final int CONNECTION_HEADER_RESPONSE_CALLID = -34;
-    private final Call setConnectionHeaderResponseCall = new Call(CONNECTION_HEADER_RESPONSE_CALLID,
-        null, null, null, null, null, this, null, 0, null, null, 0, null);
+    protected static final int CONNECTION_HEADER_RESPONSE_CALLID = -34;
 
     // was authentication allowed with a fallback to simple auth
-    private boolean authenticatedWithFallback;
+    protected boolean authenticatedWithFallback;
 
-    private boolean retryImmediatelySupported = false;
+    protected boolean retryImmediatelySupported = false;
 
     public UserGroupInformation attemptingUser = null; // user name before auth
     protected User user = null;
     protected UserGroupInformation ugi = null;
 
-    public Connection(SocketChannel channel, long lastContact) {
-      this.channel = channel;
-      this.lastContact = lastContact;
-      this.data = null;
+    public Connection() {
       this.callCleanup = null;
-      this.dataLengthBuffer = ByteBuffer.allocate(4);
-      this.socket = channel.socket();
-      this.addr = socket.getInetAddress();
-      if (addr == null) {
-        this.hostAddress = "*Unknown*";
-      } else {
-        this.hostAddress = addr.getHostAddress();
-      }
-      this.remotePort = socket.getPort();
-      if (socketSendBufferSize != 0) {
-        try {
-          socket.setSendBufferSize(socketSendBufferSize);
-        } catch (IOException e) {
-          LOG.warn("Connection: unable to set socket send buffer size to " +
-                   socketSendBufferSize);
-        }
-      }
     }
 
-      @Override
+    @Override
     public String toString() {
       return getHostAddress() + ":" + remotePort;
     }
@@ -1468,10 +787,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return remotePort;
     }
 
-    public void setLastContact(long lastContact) {
-      this.lastContact = lastContact;
-    }
-
     public VersionInfo getVersionInfo() {
       if (connectionHeader.hasVersionInfo()) {
         return connectionHeader.getVersionInfo();
@@ -1479,26 +794,13 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return null;
     }
 
-    public long getLastContact() {
-      return lastContact;
-    }
-
-    /* Return true if the connection has no outstanding rpc */
-    private boolean isIdle() {
-      return rpcCount.sum() == 0;
-    }
-
-    /* Decrement the outstanding RPC count */
-    protected void decRpcCount() {
-      rpcCount.decrement();
-    }
-
-    /* Increment the outstanding RPC count */
-    protected void incRpcCount() {
-      rpcCount.increment();
+    protected String getFatalConnectionString(final int version, final byte authByte) {
+      return "serverVersion=" + CURRENT_VERSION +
+      ", clientVersion=" + version + ", authMethod=" + authByte +
+      ", authSupported=" + (authMethod != null) + " from " + toString();
     }
 
-    private UserGroupInformation getAuthorizedUgi(String authorizedId)
+    protected UserGroupInformation getAuthorizedUgi(String authorizedId)
         throws IOException {
       UserGroupInformation authorizedUgi;
       if (authMethod == AuthMethod.DIGEST) {
@@ -1517,527 +819,20 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return authorizedUgi;
     }
 
-    private void saslReadAndProcess(ByteBuff saslToken) throws IOException,
-        InterruptedException {
-      if (saslContextEstablished) {
-        if (LOG.isTraceEnabled())
-          LOG.trace("Have read input token of size " + saslToken.limit()
-              + " for processing by saslServer.unwrap()");
-
-        if (!useWrap) {
-          processOneRpc(saslToken);
-        } else {
-          byte[] b = saslToken.hasArray() ? saslToken.array() : saslToken.toBytes();
-          byte [] plaintextData;
-          if (useCryptoAesWrap) {
-            // unwrap with CryptoAES
-            plaintextData = cryptoAES.unwrap(b, 0, b.length);
-          } else {
-            plaintextData = saslServer.unwrap(b, 0, b.length);
-          }
-          processUnwrappedData(plaintextData);
-        }
-      } else {
-        byte[] replyToken;
-        try {
-          if (saslServer == null) {
-            switch (authMethod) {
-            case DIGEST:
-              if (secretManager == null) {
-                throw new AccessDeniedException(
-                    "Server is not configured to do DIGEST authentication.");
-              }
-              saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
-                  .getMechanismName(), null, SaslUtil.SASL_DEFAULT_REALM,
-                  HBaseSaslRpcServer.getSaslProps(), new SaslDigestCallbackHandler(
-                      secretManager, this));
-              break;
-            default:
-              UserGroupInformation current = UserGroupInformation.getCurrentUser();
-              String fullName = current.getUserName();
-              if (LOG.isDebugEnabled()) {
-                LOG.debug("Kerberos principal name is " + fullName);
-              }
-              final String names[] = SaslUtil.splitKerberosName(fullName);
-              if (names.length != 3) {
-                throw new AccessDeniedException(
-                    "Kerberos principal name does NOT have the expected "
-                        + "hostname part: " + fullName);
-              }
-              current.doAs(new PrivilegedExceptionAction<Object>() {
-                @Override
-                public Object run() throws SaslException {
-                  saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS
-                      .getMechanismName(), names[0], names[1],
-                      HBaseSaslRpcServer.getSaslProps(), new SaslGssCallbackHandler());
-                  return null;
-                }
-              });
-            }
-            if (saslServer == null)
-              throw new AccessDeniedException(
-                  "Unable to find SASL server implementation for "
-                      + authMethod.getMechanismName());
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Created SASL server with mechanism = " + authMethod.getMechanismName());
-            }
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Have read input token of size " + saslToken.limit()
-                + " for processing by saslServer.evaluateResponse()");
-          }
-          replyToken = saslServer
-              .evaluateResponse(saslToken.hasArray() ? saslToken.array() : saslToken.toBytes());
-        } catch (IOException e) {
-          IOException sendToClient = e;
-          Throwable cause = e;
-          while (cause != null) {
-            if (cause instanceof InvalidToken) {
-              sendToClient = (InvalidToken) cause;
-              break;
-            }
-            cause = cause.getCause();
-          }
-          doRawSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(),
-            sendToClient.getLocalizedMessage());
-          metrics.authenticationFailure();
-          String clientIP = this.toString();
-          // attempting user could be null
-          AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
-          throw e;
-        }
-        if (replyToken != null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Will send token of size " + replyToken.length
-                + " from saslServer.");
-          }
-          doRawSaslReply(SaslStatus.SUCCESS, new BytesWritable(replyToken), null,
-              null);
-        }
-        if (saslServer.isComplete()) {
-          String qop = (String) saslServer.getNegotiatedProperty(Sasl.QOP);
-          useWrap = qop != null && !"auth".equalsIgnoreCase(qop);
-          ugi = getAuthorizedUgi(saslServer.getAuthorizationID());
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("SASL server context established. Authenticated client: "
-              + ugi + ". Negotiated QoP is "
-              + saslServer.getNegotiatedProperty(Sasl.QOP));
-          }
-          metrics.authenticationSuccess();
-          AUDITLOG.info(AUTH_SUCCESSFUL_FOR + ugi);
-          saslContextEstablished = true;
-        }
-      }
-    }
-
     /**
-     * No protobuf encoding of raw sasl messages
+     * Set up cell block codecs
+     * @throws FatalConnectionException
      */
-    private void doRawSaslReply(SaslStatus status, Writable rv,
-        String errorClass, String error) throws IOException {
-      ByteBufferOutputStream saslResponse = null;
-      DataOutputStream out = null;
+    protected void setupCellBlockCodecs(final ConnectionHeader header)
+        throws FatalConnectionException {
+      // TODO: Plug in other supported decoders.
+      if (!header.hasCellBlockCodecClass()) return;
+      String className = header.getCellBlockCodecClass();
+      if (className == null || className.length() == 0) return;
       try {
-        // In my testing, have noticed that sasl messages are usually
-        // in the ballpark of 100-200. That's why the initial capacity is 256.
-        saslResponse = new ByteBufferOutputStream(256);
-        out = new DataOutputStream(saslResponse);
-        out.writeInt(status.state); // write status
-        if (status == SaslStatus.SUCCESS) {
-          rv.write(out);
-        } else {
-          WritableUtils.writeString(out, errorClass);
-          WritableUtils.writeString(out, error);
-        }
-        saslCall.setSaslTokenResponse(saslResponse.getByteBuffer());
-        saslCall.responder = responder;
-        saslCall.sendResponseIfReady();
-      } finally {
-        if (saslResponse != null) {
-          saslResponse.close();
-        }
-        if (out != null) {
-          out.close();
-        }
-      }
-    }
-
-    /**
-     * Send the response for connection header
-     */
-    private void doConnectionHeaderResponse(byte[] wrappedCipherMetaData) throws IOException {
-      ByteBufferOutputStream response = null;
-      DataOutputStream out = null;
-      try {
-        response = new ByteBufferOutputStream(wrappedCipherMetaData.length + 4);
-        out = new DataOutputStream(response);
-        out.writeInt(wrappedCipherMetaData.length);
-        out.write(wrappedCipherMetaData);
-
-        setConnectionHeaderResponseCall.setConnectionHeaderResponse(response.getByteBuffer());
-        setConnectionHeaderResponseCall.responder = responder;
-        setConnectionHeaderResponseCall.sendResponseIfReady();
-      } finally {
-        if (out != null) {
-          out.close();
-        }
-        if (response != null) {
-          response.close();
-        }
-      }
-    }
-
-    private void disposeSasl() {
-      if (saslServer != null) {
-        try {
-          saslServer.dispose();
-          saslServer = null;
-        } catch (SaslException ignored) {
-          // Ignored. This is being disposed of anyway.
-        }
-      }
-    }
-
-    private int readPreamble() throws IOException {
-      int count;
-      // Check for 'HBas' magic.
-      this.dataLengthBuffer.flip();
-      if (!Arrays.equals(HConstants.RPC_HEADER, dataLengthBuffer.array())) {
-        return doBadPreambleHandling("Expected HEADER=" +
-            Bytes.toStringBinary(HConstants.RPC_HEADER) +
-            " but received HEADER=" + Bytes.toStringBinary(dataLengthBuffer.array()) +
-            " from " + toString());
-      }
-      // Now read the next two bytes, the version and the auth to use.
-      ByteBuffer versionAndAuthBytes = ByteBuffer.allocate(2);
-      count = channelRead(channel, versionAndAuthBytes);
-      if (count < 0 || versionAndAuthBytes.remaining() > 0) {
-        return count;
-      }
-      int version = versionAndAuthBytes.get(0);
-      byte authbyte = versionAndAuthBytes.get(1);
-      this.authMethod = AuthMethod.valueOf(authbyte);
-      if (version != CURRENT_VERSION) {
-        String msg = getFatalConnectionString(version, authbyte);
-        return doBadPreambleHandling(msg, new WrongVersionException(msg));
-      }
-      if (authMethod == null) {
-        String msg = getFatalConnectionString(version, authbyte);
-        return doBadPreambleHandling(msg, new BadAuthException(msg));
-      }
-      if (isSecurityEnabled && authMethod == AuthMethod.SIMPLE) {
-        if (allowFallbackToSimpleAuth) {
-          metrics.authenticationFallback();
-          authenticatedWithFallback = true;
-        } else {
-          AccessDeniedException ae = new AccessDeniedException("Authentication is required");
-          setupResponse(authFailedResponse, authFailedCall, ae, ae.getMessage());
-          responder.doRespond(authFailedCall);
-          throw ae;
-        }
-      }
-      if (!isSecurityEnabled && authMethod != AuthMethod.SIMPLE) {
-        doRawSaslReply(SaslStatus.SUCCESS, new IntWritable(
-            SaslUtil.SWITCH_TO_SIMPLE_AUTH), null, null);
-        authMethod = AuthMethod.SIMPLE;
-        // client has already sent the initial Sasl message and we
-        // should ignore it. Both client and server should fall back
-        // to simple auth from now on.
-        skipInitialSaslHandshake = true;
-      }
-      if (authMethod != AuthMethod.SIMPLE) {
-        useSasl = true;
-      }
-
-      dataLengthBuffer.clear();
-      connectionPreambleRead = true;
-      return count;
-    }
-
-    private int read4Bytes() throws IOException {
-      if (this.dataLengthBuffer.remaining() > 0) {
-        return channelRead(channel, this.dataLengthBuffer);
-      } else {
-        return 0;
-      }
-    }
-
-
-    /**
-     * Read off the wire. If there is not enough data to read, update the connection state with
-     *  what we have and returns.
-     * @return Returns -1 if failure (and caller will close connection), else zero or more.
-     * @throws IOException
-     * @throws InterruptedException
-     */
-    public int readAndProcess() throws IOException, InterruptedException {
-      // Try and read in an int.  If new connection, the int will hold the 'HBas' HEADER.  If it
-      // does, read in the rest of the connection preamble, the version and the auth method.
-      // Else it will be length of the data to read (or -1 if a ping).  We catch the integer
-      // length into the 4-byte this.dataLengthBuffer.
-      int count = read4Bytes();
-      if (count < 0 || dataLengthBuffer.remaining() > 0) {
-        return count;
-      }
-
-      // If we have not read the connection setup preamble, look to see if that is on the wire.
-      if (!connectionPreambleRead) {
-        count = readPreamble();
-        if (!connectionPreambleRead) {
-          return count;
-        }
-
-        count = read4Bytes();
-        if (count < 0 || dataLengthBuffer.remaining() > 0) {
-          return count;
-        }
-      }
-
-      // We have read a length and we have read the preamble.  It is either the connection header
-      // or it is a request.
-      if (data == null) {
-        dataLengthBuffer.flip();
-        int dataLength = dataLengthBuffer.getInt();
-        if (dataLength == RpcClient.PING_CALL_ID) {
-          if (!useWrap) { //covers the !useSasl too
-            dataLengthBuffer.clear();
-            return 0;  //ping message
-          }
-        }
-        if (dataLength < 0) { // A data length of zero is legal.
-          throw new DoNotRetryIOException("Unexpected data length "
-              + dataLength + "!! from " + getHostAddress());
-        }
-
-        if (dataLength > maxRequestSize) {
-          String msg = "RPC data length of " + dataLength + " received from "
-              + getHostAddress() + " is greater than max allowed "
-              + maxRequestSize + ". Set \"" + MAX_REQUEST_SIZE
-              + "\" on server to override this limit (not recommended)";
-          LOG.warn(msg);
-
-          if (connectionHeaderRead && connectionPreambleRead) {
-            incRpcCount();
-            // Construct InputStream for the non-blocking SocketChannel
-            // We need the InputStream because we want to read only the request header
-            // instead of the whole rpc.
-            ByteBuffer buf = ByteBuffer.allocate(1);
-            InputStream is = new InputStream() {
-              @Override
-              public int read() throws IOException {
-                channelRead(channel, buf);
-                buf.flip();
-                int x = buf.get();
-                buf.flip();
-                return x;
-              }
-            };
-            CodedInputStream cis = CodedInputStream.newInstance(is);
-            int headerSize = cis.readRawVarint32();
-            Message.Builder builder = RequestHeader.newBuilder();
-            ProtobufUtil.mergeFrom(builder, cis, headerSize);
-            RequestHeader header = (RequestHeader) builder.build();
-
-            // Notify the client about the offending request
-            Call reqTooBig = new Call(header.getCallId(), this.service, null, null, null,
-                null, this, responder, 0, null, this.addr, 0, null);
-            metrics.exception(REQUEST_TOO_BIG_EXCEPTION);
-            // Make sure the client recognizes the underlying exception
-            // Otherwise, throw a DoNotRetryIOException.
-            if (VersionInfoUtil.hasMinimumVersion(connectionHeader.getVersionInfo(),
-                RequestTooBigException.MAJOR_VERSION, RequestTooBigException.MINOR_VERSION)) {
-              setupResponse(null, reqTooBig, REQUEST_TOO_BIG_EXCEPTION, msg);
-            } else {
-              setupResponse(null, reqTooBig, new DoNotRetryIOException(), msg);
-            }
-            // We are going to close the connection, make sure we process the response
-            // before that. In rare case when this fails, we still close the connection.
-            responseWriteLock.lock();
-            responder.processResponse(reqTooBig);
-            responseWriteLock.unlock();
-          }
-          // Close the connection
-          return -1;
-        }
-
-        // Initialize this.data with a ByteBuff.
-        // This call will allocate a ByteBuff to read request into and assign to this.data
-        // Also when we use some buffer(s) from pool, it will create a CallCleanup instance also and
-        // assign to this.callCleanup
-        initByteBuffToReadInto(dataLength);
-
-        // Increment the rpc count. This counter will be decreased when we write
-        //  the response.  If we want the connection to be detected as idle properly, we
-        //  need to keep the inc / dec correct.
-        incRpcCount();
-      }
-
-      count = channelDataRead(channel, data);
-
-      if (count >= 0 && data.remaining() == 0) { // count==0 if dataLength == 0
-        process();
-      }
-
-      return count;
-    }
-
-    // It creates the ByteBuff and CallCleanup and assign to Connection instance.
-    private void initByteBuffToReadInto(int length) {
-      // We create random on heap buffers are read into those when
-      // 1. ByteBufferPool is not there.
-      // 2. When the size of the req is very small. Using a large sized (64 KB) buffer from pool is
-      // waste then. Also if all the reqs are of this size, we will be creating larger sized
-      // buffers and pool them permanently. This include Scan/Get request and DDL kind of reqs like
-      // RegionOpen.
-      // 3. If it is an initial handshake signal or initial connection request. Any way then
-      // condition 2 itself will match
-      // 4. When SASL use is ON.
-      if (reservoir == null || skipInitialSaslHandshake || !connectionHeaderRead || useSasl
-          || length < minSizeForReservoirUse) {
-        this.data = new SingleByteBuff(ByteBuffer.allocate(length));
-      } else {
-        Pair<ByteBuff, CallCleanup> pair = RpcServer.allocateByteBuffToReadInto(reservoir,
-            minSizeForReservoirUse, length);
-        this.data = pair.getFirst();
-        this.callCleanup = pair.getSecond();
-      }
-    }
-
-    protected int channelDataRead(ReadableByteChannel channel, ByteBuff buf) throws IOException {
-      int count = buf.read(channel);
-      if (count > 0) {
-        metrics.receivedBytes(count);
-      }
-      return count;
-    }
-
-    /**
-     * Process the data buffer and clean the connection state for the next call.
-     */
-    private void process() throws IOException, InterruptedException {
-      data.rewind();
-      try {
-        if (skipInitialSaslHandshake) {
-          skipInitialSaslHandshake = false;
-          return;
-        }
-
-        if (useSasl) {
-          saslReadAndProcess(data);
-        } else {
-          processOneRpc(data);
-        }
-
-      } finally {
-        dataLengthBuffer.clear(); // Clean for the next call
-        data = null; // For the GC
-        this.callCleanup = null;
-      }
-    }
-
-    private String getFatalConnectionString(final int version, final byte authByte) {
-      return "serverVersion=" + CURRENT_VERSION +
-      ", clientVersion=" + version + ", authMethod=" + authByte +
-      ", authSupported=" + (authMethod != null) + " from " + toString();
-    }
-
-    private int doBadPreambleHandling(final String msg) throws IOException {
-      return doBadPreambleHandling(msg, new FatalConnectionException(msg));
-    }
-
-    private int doBadPreambleHandling(final String msg, final Exception e) throws IOException {
-      LOG.warn(msg);
-      Call fakeCall = new Call(-1, null, null, null, null, null, this, responder, -1, null, null, 0,
-          null);
-      setupResponse(null, fakeCall, e, msg);
-      responder.doRespond(fakeCall);
-      // Returning -1 closes out the connection.
-      return -1;
-    }
-
-    // Reads the connection header following version
-    private void processConnectionHeader(ByteBuff buf) throws IOException {
-      if (buf.hasArray()) {
-        this.connectionHeader = ConnectionHeader.parseFrom(buf.array());
-      } else {
-        CodedInputStream cis = UnsafeByteOperations
-            .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput();
-        cis.enableAliasing(true);
-        this.connectionHeader = ConnectionHeader.parseFrom(cis);
-      }
-      String serviceName = connectionHeader.getServiceName();
-      if (serviceName == null) throw new EmptyServiceNameException();
-      this.service = getService(services, serviceName);
-      if (this.service == null) throw new UnknownServiceException(serviceName);
-      setupCellBlockCodecs(this.connectionHeader);
-      RPCProtos.ConnectionHeaderResponse.Builder chrBuilder =
-          RPCProtos.ConnectionHeaderResponse.newBuilder();
-      setupCryptoCipher(this.connectionHeader, chrBuilder);
-      responseConnectionHeader(chrBuilder);
-      UserGroupInformation protocolUser = createUser(connectionHeader);
-      if (!useSasl) {
-        ugi = protocolUser;
-        if (ugi != null) {
-          ugi.setAuthenticationMethod(AuthMethod.SIMPLE.authenticationMethod);
-        }
-        // audit logging for SASL authenticated users happens in saslReadAndProcess()
-        if (authenticatedWithFallback) {
-          LOG.warn("Allowed fallback to SIMPLE auth for " + ugi
-              + " connecting from " + getHostAddress());
-        }
-        AUDITLOG.info(AUTH_SUCCESSFUL_FOR + ugi);
-      } else {
-        // user is authenticated
-        ugi.setAuthenticationMethod(authMethod.authenticationMethod);
-        //Now we check if this is a proxy user case. If the protocol user is
-        //different from the 'user', it is a proxy user scenario. However,
-        //this is not allowed if user authenticated with DIGEST.
-        if ((protocolUser != null)
-            && (!protocolUser.getUserName().equals(ugi.getUserName()))) {
-          if (authMethod == AuthMethod.DIGEST) {
-            // Not allowed to doAs if token authentication is used
-            throw new AccessDeniedException("Authenticated user (" + ugi
-                + ") doesn't match what the client claims to be ("
-                + protocolUser + ")");
-          } else {
-            // Effective user can be different from authenticated user
-            // for simple auth or kerberos auth
-            // The user is the real user. Now we create a proxy user
-            UserGroupInformation realUser = ugi;
-            ugi = UserGroupInformation.createProxyUser(protocolUser
-                .getUserName(), realUser);
-            // Now the user is a proxy user, set Authentication method Proxy.
-            ugi.setAuthenticationMethod(AuthenticationMethod.PROXY);
-          }
-        }
-      }
-      if (connectionHeader.hasVersionInfo()) {
-        // see if this connection will support RetryImmediatelyException
-        retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2);
-
-        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
-            + " with version info: "
-            + TextFormat.shortDebugString(connectionHeader.getVersionInfo()));
-      } else {
-        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
-            + " with unknown version info");
-      }
-    }
-
-    /**
-     * Set up cell block codecs
-     * @throws FatalConnectionException
-     */
-    private void setupCellBlockCodecs(final ConnectionHeader header)
-    throws FatalConnectionException {
-      // TODO: Plug in other supported decoders.
-      if (!header.hasCellBlockCodecClass()) return;
-      String className = header.getCellBlockCodecClass();
-      if (className == null || className.length() == 0) return;
-      try {
-        this.codec = (Codec)Class.forName(className).newInstance();
-      } catch (Exception e) {
-        throw new UnsupportedCellCodecException(className, e);
+        this.codec = (Codec)Class.forName(className).newInstance();
+      } catch (Exception e) {
+        throw new UnsupportedCellCodecException(className, e);
       }
       if (!header.hasCellBlockCompressorClass()) return;
       className = header.getCellBlockCompressorClass();
@@ -2050,10 +845,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     /**
      * Set up cipher for rpc encryption with Apache Commons Crypto
+     *
      * @throws FatalConnectionException
      */
-    private void setupCryptoCipher(final ConnectionHeader header,
-        RPCProtos.ConnectionHeaderResponse.Builder chrBuilder) throws FatalConnectionException {
+    protected void setupCryptoCipher(final ConnectionHeader header,
+        RPCProtos.ConnectionHeaderResponse.Builder chrBuilder)
+        throws FatalConnectionException {
       // If simple auth, return
       if (saslServer == null) return;
       // check if rpc encryption with Crypto AES
@@ -2117,254 +914,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
     }
 
-    private void responseConnectionHeader(RPCProtos.ConnectionHeaderResponse.Builder chrBuilder)
-        throws FatalConnectionException {
-      // Response the connection header if Crypto AES is enabled
-      if (!chrBuilder.hasCryptoCipherMeta()) return;
-      try {
-        byte[] connectionHeaderResBytes = chrBuilder.build().toByteArray();
-        // encrypt the Crypto AES cipher meta data with sasl server, and send to client
-        byte[] unwrapped = new byte[connectionHeaderResBytes.length + 4];
-        Bytes.putBytes(unwrapped, 0, Bytes.toBytes(connectionHeaderResBytes.length), 0, 4);
-        Bytes.putBytes(unwrapped, 4, connectionHeaderResBytes, 0, connectionHeaderResBytes.length);
-
-        doConnectionHeaderResponse(saslServer.wrap(unwrapped, 0, unwrapped.length));
-      } catch (IOException ex) {
-        throw new UnsupportedCryptoException(ex.getMessage(), ex);
-      }
-    }
-
-    private void processUnwrappedData(byte[] inBuf) throws IOException,
-    InterruptedException {
-      ReadableByteChannel ch = Channels.newChannel(new ByteArrayInputStream(inBuf));
-      // Read all RPCs contained in the inBuf, even partial ones
-      while (true) {
-        int count;
-        if (unwrappedDataLengthBuffer.remaining() > 0) {
-          count = channelRead(ch, unwrappedDataLengthBuffer);
-          if (count <= 0 || unwrappedDataLengthBuffer.remaining() > 0)
-            return;
-        }
-
-        if (unwrappedData == null) {
-          unwrappedDataLengthBuffer.flip();
-          int unwrappedDataLength = unwrappedDataLengthBuffer.getInt();
-
-          if (unwrappedDataLength == RpcClient.PING_CALL_ID) {
-            if (LOG.isDebugEnabled())
-              LOG.debug("Received ping message");
-            unwrappedDataLengthBuffer.clear();
-            continue; // ping message
-          }
-          unwrappedData = ByteBuffer.allocate(unwrappedDataLength);
-        }
-
-        count = channelRead(ch, unwrappedData);
-        if (count <= 0 || unwrappedData.remaining() > 0)
-          return;
-
-        if (unwrappedData.remaining() == 0) {
-          unwrappedDataLengthBuffer.clear();
-          unwrappedData.flip();
-          processOneRpc(new SingleByteBuff(unwrappedData));
-          unwrappedData = null;
-        }
-      }
-    }
-
-    private void processOneRpc(ByteBuff buf) throws IOException, InterruptedException {
-      if (connectionHeaderRead) {
-        processRequest(buf);
-      } else {
-        processConnectionHeader(buf);
-        this.connectionHeaderRead = true;
-        if (!authorizeConnection()) {
-          // Throw FatalConnectionException wrapping ACE so client does right thing and closes
-          // down the connection instead of trying to read non-existent retun.
-          throw new AccessDeniedException("Connection from " + this + " for service " +
-            connectionHeader.getServiceName() + " is unauthorized for user: " + ugi);
-        }
-        this.user = userProvider.create(this.ugi);
-      }
-    }
-
-    /**
-     * @param buf Has the request header and the request param and optionally encoded data buffer
-     * all in this one array.
-     * @throws IOException
-     * @throws InterruptedException
-     */
-    protected void processRequest(ByteBuff buf) throws IOException, InterruptedException {
-      long totalRequestSize = buf.limit();
-      int offset = 0;
-      // Here we read in the header.  We avoid having pb
-      // do its default 4k allocation for CodedInputStream.  We force it to use backing array.
-      CodedInputStream cis;
-      if (buf.hasArray()) {
-        cis = UnsafeByteOperations.unsafeWrap(buf.array(), 0, buf.limit()).newCodedInput();
-      } else {
-        cis = UnsafeByteOperations
-            .unsafeWrap(new ByteBuffByteInput(buf, 0, buf.limit()), 0, buf.limit()).newCodedInput();
-      }
-      cis.enableAliasing(true);
-      int headerSize = cis.readRawVarint32();
-      offset = cis.getTotalBytesRead();
-      Message.Builder builder = RequestHeader.newBuilder();
-      ProtobufUtil.mergeFrom(builder, cis, headerSize);
-      RequestHeader header = (RequestHeader) builder.build();
-      offset += headerSize;
-      int id = header.getCallId();
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("RequestHeader " + TextFormat.shortDebugString(header) +
-          " totalRequestSize: " + totalRequestSize + " bytes");
-      }
-      // Enforcing the call queue size, this triggers a retry in the client
-      // This is a bit late to be doing this check - we have already read in the total request.
-      if ((totalRequestSize + callQueueSizeInBytes.sum()) > maxQueueSizeInBytes) {
-        final Call callTooBig =
-          new Call(id, this.service, null, null, null, null, this,
-            responder, totalRequestSize, null, null, 0, this.callCleanup);
-        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
-        metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-        setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
-            "Call queue is full on " + server.getServerName() +
-                ", is hbase.ipc.server.max.callqueue.size too small?");
-        responder.doRespond(callTooBig);
-        return;
-      }
-      MethodDescriptor md = null;
-      Message param = null;
-      CellScanner cellScanner = null;
-      try {
-        if (header.hasRequestParam() && header.getRequestParam()) {
-          md = this.service.getDescriptorForType().findMethodByName(header.getMethodName());
-          if (md == null) throw new UnsupportedOperationException(header.getMethodName());
-          builder = this.service.getRequestPrototype(md).newBuilderForType();
-          cis.resetSizeCounter();
-          int paramSize = cis.readRawVarint32();
-          offset += cis.getTotalBytesRead();
-          if (builder != null) {
-            ProtobufUtil.mergeFrom(builder, cis, paramSize);
-            param = builder.build();
-          }
-          offset += paramSize;
-        } else {
-          // currently header must have request param, so we directly throw exception here
-          String msg = "Invalid request header: " + TextFormat.shortDebugString(header)
-              + ", should have param set in it";
-          LOG.warn(msg);
-          throw new DoNotRetryIOException(msg);
-        }
-        if (header.hasCellBlockMeta()) {
-          buf.position(offset);
-          ByteBuff dup = buf.duplicate();
-          dup.limit(offset + header.getCellBlockMeta().getLength());
-          cellScanner = cellBlockBuilder.createCellScannerReusingBuffers(this.codec,
-              this.compressionCodec, dup);
-        }
-      } catch (Throwable t) {
-        InetSocketAddress address = getListenerAddress();
-        String msg = (address != null ? address : "(channel closed)") +
-            " is unable to read call parameter from client " + getHostAddress();
-        LOG.warn(msg, t);
-
-        metrics.exception(t);
-
-        // probably the hbase hadoop version does not match the running hadoop version
-        if (t instanceof LinkageError) {
-          t = new DoNotRetryIOException(t);
-        }
-        // If the method is not present on the server, do not retry.
-        if (t instanceof UnsupportedOperationException) {
-          t = new DoNotRetryIOException(t);
-        }
-
-        final Call readParamsFailedCall =
-          new Call(id, this.service, null, null, null, null, this,
-            responder, totalRequestSize, null, null, 0, this.callCleanup);
-        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
-        setupResponse(responseBuffer, readParamsFailedCall, t,
-          msg + "; " + t.getMessage());
-        responder.doRespond(readParamsFailedCall);
-        return;
-      }
-
-      TraceInfo traceInfo = header.hasTraceInfo()
-          ? new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
-          : null;
-      int timeout = 0;
-      if (header.hasTimeout() && header.getTimeout() > 0){
-        timeout = Math.max(minClientRequestTimeout, header.getTimeout());
-      }
-      Call call = new Call(id, this.service, md, header, param, cellScanner, this, responder,
-          totalRequestSize, traceInfo, this.addr, timeout, this.callCleanup);
-
-      if (!scheduler.dispatch(new CallRunner(RpcServer.this, call))) {
-        callQueueSizeInBytes.add(-1 * call.getSize());
-
-        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
-        metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-        setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
-            "Call queue is full on " + server.getServerName() +
-                ", too many items queued ?");
-        responder.doRespond(call);
-      }
-    }
-
     private ByteString getByteString(byte[] bytes) {
       // return singleton to reduce object allocation
       return (bytes.length == 0) ? ByteString.EMPTY : ByteString.copyFrom(bytes);
     }
 
-    private boolean authorizeConnection() throws IOException {
-      try {
-        // If auth method is DIGEST, the token was obtained by the
-        // real user for the effective user, therefore not required to
-        // authorize real user. doAs is allowed only for simple or kerberos
-        // authentication
-        if (ugi != null && ugi.getRealUser() != null
-            && (authMethod != AuthMethod.DIGEST)) {
-          ProxyUsers.authorize(ugi, this.getHostAddress(), conf);
-        }
-        authorize(ugi, connectionHeader, getHostInetAddress());
-        metrics.authorizationSuccess();
-      } catch (AuthorizationException ae) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Connection authorization failed: " + ae.getMessage(), ae);
-        }
-        metrics.authorizationFailure();
-        setupResponse(authFailedResponse, authFailedCall,
-          new AccessDeniedException(ae), ae.getMessage());
-        responder.doRespond(authFailedCall);
-        return false;
-      }
-      return true;
-    }
-
-    protected synchronized void close() {
-      disposeSasl();
-      data = null;
-      callCleanup = null;
-      if (!channel.isOpen())
-        return;
-      try {socket.shutdownOutput();} catch(Exception ignored) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Ignored exception", ignored);
-        }
-      }
-      if (channel.isOpen()) {
-        try {channel.close();} catch(Exception ignored) {}
-      }
-      try {
-        socket.close();
-      } catch(Exception ignored) {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Ignored exception", ignored);
-        }
-      }
-    }
-
-    private UserGroupInformation createUser(ConnectionHeader head) {
+    protected UserGroupInformation createUser(ConnectionHeader head) {
       UserGroupInformation ugi = null;
 
       if (!head.hasUserInfo()) {
@@ -2390,6 +945,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
       return ugi;
     }
+
+    public abstract boolean isConnectionOpen();
+
   }
 
   /**
@@ -2459,43 +1017,30 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     this.services = services;
     this.bindAddress = bindAddress;
     this.conf = conf;
-    this.socketSendBufferSize = 0;
     // See declaration above for documentation on what this size is.
     this.maxQueueSizeInBytes =
       this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
-    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
-    this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
-      2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
+
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);
     this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE);
     this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT,
         DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT);
     this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE);
 
-    // Start the listener here and let it bind to the port
-    listener = new Listener(name);
-    this.port = listener.getAddress().getPort();
-
     this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this));
     this.tcpNoDelay = conf.getBoolean("hbase.ipc.server.tcpnodelay", true);
     this.tcpKeepAlive = conf.getBoolean("hbase.ipc.server.tcpkeepalive", true);
 
     this.cellBlockBuilder = new CellBlockBuilder(conf);
 
-
-    // Create the responder here
-    responder = new Responder();
-    connectionManager = new ConnectionManager();
     this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
     this.userProvider = UserProvider.instantiate(conf);
     this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled();
     if (isSecurityEnabled) {
       HBaseSaslRpcServer.init(conf);
     }
-    initReconfigurable(conf);
 
     this.scheduler = scheduler;
-    this.scheduler.init(new RpcSchedulerContext(this));
   }
 
   @VisibleForTesting
@@ -2507,11 +1052,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   public void onConfigurationChange(Configuration newConf) {
     initReconfigurable(newConf);
     if (scheduler instanceof ConfigurationObserver) {
-      ((ConfigurationObserver)scheduler).onConfigurationChange(newConf);
+      ((ConfigurationObserver) scheduler).onConfigurationChange(newConf);
     }
   }
 
-  private void initReconfigurable(Configuration confToLoad) {
+  protected void initReconfigurable(Configuration confToLoad) {
     this.allowFallbackToSimpleAuth = confToLoad.getBoolean(FALLBACK_TO_INSECURE_CLIENT_AUTH, false);
     if (isSecurityEnabled && allowFallbackToSimpleAuth) {
       LOG.warn("********* WARNING! *********");
@@ -2525,64 +1070,15 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     }
   }
 
-  /**
-   * Subclasses of HBaseServer can override this to provide their own
-   * Connection implementations.
-   */
-  protected Connection getConnection(SocketChannel channel, long time) {
-    return new Connection(channel, time);
-  }
-
-  /**
-   * Setup response for the RPC Call.
-   *
-   * @param response buffer to serialize the response into
-   * @param call {@link Call} to which we are setting up the response
-   * @param error error message, if the call failed
-   * @throws IOException
-   */
-  private void setupResponse(ByteArrayOutputStream response, Call call, Throwable t, String error)
-  throws IOException {
-    if (response != null) response.reset();
-    call.setResponse(null, null, t, error);
-  }
-
-  protected void closeConnection(Connection connection) {
-    connectionManager.close(connection);
-  }
-
   Configuration getConf() {
     return conf;
   }
 
-  /** Sets the socket buffer size used for responding to RPCs.
-   * @param size send size
-   */
-  @Override
-  public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
-
   @Override
   public boolean isStarted() {
     return this.started;
   }
 
-  /** Starts the service.  Must be called before any calls will be handled. */
-  @Override
-  public synchronized void start() {
-    if (started) return;
-    authTokenSecretMgr = createSecretManager();
-    if (authTokenSecretMgr != null) {
-      setSecretManager(authTokenSecretMgr);
-      authTokenSecretMgr.start();
-    }
-    this.authManager = new ServiceAuthorizationManager();
-    HBasePolicyProvider.init(conf, authManager);
-    responder.start();
-    listener.start();
-    scheduler.start();
-    started = true;
-  }
-
   @Override
   public synchronized void refreshAuthManager(PolicyProvider pp) {
     // Ignore warnings that this should be accessed in a static way instead of via an instance;
@@ -2590,7 +1086,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     this.authManager.refresh(this.conf, pp);
   }
 
-  private AuthenticationTokenSecretManager createSe

<TRUNCATED>

[43/50] [abbrv] hbase git commit: HBASE-17336 get/update replication peer config requests should be routed through master

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
index 7a17985..51d3fc9 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
@@ -3923,6 +3923,2458 @@ public final class ReplicationProtos {
 
   }
 
+  public interface GetReplicationPeerConfigRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.GetReplicationPeerConfigRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetReplicationPeerConfigRequest}
+   */
+  public  static final class GetReplicationPeerConfigRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.GetReplicationPeerConfigRequest)
+      GetReplicationPeerConfigRequestOrBuilder {
+    // Use GetReplicationPeerConfigRequest.newBuilder() to construct.
+    private GetReplicationPeerConfigRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private GetReplicationPeerConfigRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetReplicationPeerConfigRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetReplicationPeerConfigRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.GetReplicationPeerConfigRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetReplicationPeerConfigRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetReplicationPeerConfigRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<GetReplicationPeerConfigRequest>() {
+      public GetReplicationPeerConfigRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new GetReplicationPeerConfigRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface GetReplicationPeerConfigResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.GetReplicationPeerConfigResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    boolean hasPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.GetReplicationPeerConfigResponse}
+   */
+  public  static final class GetReplicationPeerConfigResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.GetReplicationPeerConfigResponse)
+      GetReplicationPeerConfigResponseOrBuilder {
+    // Use GetReplicationPeerConfigResponse.newBuilder() to construct.
+    private GetReplicationPeerConfigResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private GetReplicationPeerConfigResponse() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private GetReplicationPeerConfigResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = peerConfig_.toBuilder();
+              }
+              peerConfig_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(peerConfig_);
+                peerConfig_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int PEER_CONFIG_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_;
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public boolean hasPeerConfig() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasPeerConfig()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getPeerConfig().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, getPeerConfig());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, getPeerConfig());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && (hasPeerConfig() == other.hasPeerConfig());
+      if (hasPeerConfig()) {
+        result = result && getPeerConfig()
+            .equals(other.getPeerConfig());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      if (hasPeerConfig()) {
+        hash = (37 * hash) + PEER_CONFIG_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerConfig().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.GetReplicationPeerConfigResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.GetReplicationPeerConfigResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getPeerConfigFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = null;
+        } else {
+          peerConfigBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_GetReplicationPeerConfigResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (peerConfigBuilder_ == null) {
+          result.peerConfig_ = peerConfig_;
+        } else {
+          result.peerConfig_ = peerConfigBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        if (other.hasPeerConfig()) {
+          mergePeerConfig(other.getPeerConfig());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        if (!hasPeerConfig()) {
+          return false;
+        }
+        if (!getPeerConfig().isInitialized()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> peerConfigBuilder_;
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public boolean hasPeerConfig() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+        if (peerConfigBuilder_ == null) {
+          return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+        } else {
+          return peerConfigBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if (peerConfigBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          peerConfig_ = value;
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder builderForValue) {
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = builderForValue.build();
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder mergePeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if (peerConfigBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              peerConfig_ != null &&
+              peerConfig_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) {
+            peerConfig_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder(peerConfig_).mergeFrom(value).buildPartial();
+          } else {
+            peerConfig_ = value;
+          }
+          onChanged();
+        } else {
+          peerConfigBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder clearPeerConfig() {
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = null;
+          onChanged();
+        } else {
+          peerConfigBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder getPeerConfigBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getPeerConfigFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+        if (peerConfigBuilder_ != null) {
+          return peerConfigBuilder_.getMessageOrBuilder();
+        } else {
+          return peerConfig_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> 
+          getPeerConfigFieldBuilder() {
+        if (peerConfigBuilder_ == null) {
+          peerConfigBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder>(
+                  getPeerConfig(),
+                  getParentForChildren(),
+                  isClean());
+          peerConfig_ = null;
+        }
+        return peerConfigBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.GetReplicationPeerConfigResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.GetReplicationPeerConfigResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<GetReplicationPeerConfigResponse>() {
+      public GetReplicationPeerConfigResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new GetReplicationPeerConfigResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<GetReplicationPeerConfigResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface UpdateReplicationPeerConfigRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.UpdateReplicationPeerConfigRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    boolean hasPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.UpdateReplicationPeerConfigRequest}
+   */
+  public  static final class UpdateReplicationPeerConfigRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.UpdateReplicationPeerConfigRequest)
+      UpdateReplicationPeerConfigRequestOrBuilder {
+    // Use UpdateReplicationPeerConfigRequest.newBuilder() to construct.
+    private UpdateReplicationPeerConfigRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private UpdateReplicationPeerConfigRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private UpdateReplicationPeerConfigRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = peerConfig_.toBuilder();
+              }
+              peerConfig_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(peerConfig_);
+                peerConfig_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int PEER_CONFIG_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_;
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public boolean hasPeerConfig() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasPeerConfig()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getPeerConfig().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, getPeerConfig());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, getPeerConfig());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && (hasPeerConfig() == other.hasPeerConfig());
+      if (hasPeerConfig()) {
+        result = result && getPeerConfig()
+            .equals(other.getPeerConfig());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      if (hasPeerConfig()) {
+        hash = (37 * hash) + PEER_CONFIG_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerConfig().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.UpdateReplicationPeerConfigRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.UpdateReplicationPeerConfigRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getPeerConfigFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = null;
+        } else {
+          peerConfigBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_UpdateReplicationPeerConfigRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (peerConfigBuilder_ == null) {
+          result.peerConfig_ = peerConfig_;
+        } else {
+          result.peerConfig_ = peerConfigBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        if (other.hasPeerConfig()) {
+          mergePeerConfig(other.getPeerConfig());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        if (!hasPeerConfig()) {
+          return false;
+        }
+        if (!getPeerConfig().isInitialized()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> peerConfigBuilder_;
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public boolean hasPeerConfig() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+        if (peerConfigBuilder_ == null) {
+          return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+        } else {
+          return peerConfigBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if (peerConfigBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          peerConfig_ = value;
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder builderForValue) {
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = builderForValue.build();
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder mergePeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if 

<TRUNCATED>

[15/50] [abbrv] hbase git commit: HBASE-17335 enable/disable replication peer requests should be routed through master

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 384ac67..f4e7da6 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -855,4 +855,12 @@ service MasterService {
   /** Remove a replication peer */
   rpc RemoveReplicationPeer(RemoveReplicationPeerRequest)
     returns(RemoveReplicationPeerResponse);
+
+  /** Enable a replication peer */
+  rpc EnableReplicationPeer(EnableReplicationPeerRequest)
+    returns(EnableReplicationPeerResponse);
+
+  /** Disable a replication peer */
+  rpc DisableReplicationPeer(DisableReplicationPeerRequest)
+    returns(DisableReplicationPeerResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index 0bdf2c0..83633b3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -40,3 +40,17 @@ message RemoveReplicationPeerRequest {
 
 message RemoveReplicationPeerResponse {
 }
+
+message EnableReplicationPeerRequest {
+  required string peer_id = 1;
+}
+
+message EnableReplicationPeerResponse {
+}
+
+message DisableReplicationPeerRequest {
+  required string peer_id = 1;
+}
+
+message DisableReplicationPeerResponse {
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 5067b3b..d3b3868 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1869,4 +1869,44 @@ public interface MasterObserver extends Coprocessor {
   default void postRemoveReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       String peerId) throws IOException {
   }
+
+  /**
+   * Called before enable a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void preEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
+
+  /**
+   * Called after enable a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void postEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
+
+  /**
+   * Called before disable a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void preDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
+
+  /**
+   * Called after disable a replication peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void postDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index da35da1..6b135d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3172,4 +3172,28 @@ public class HMaster extends HRegionServer implements MasterServices {
       cpHost.postRemoveReplicationPeer(peerId);
     }
   }
+
+  @Override
+  public void enableReplicationPeer(String peerId) throws ReplicationException, IOException {
+    if (cpHost != null) {
+      cpHost.preEnableReplicationPeer(peerId);
+    }
+    LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId);
+    this.replicationManager.enableReplicationPeer(peerId);
+    if (cpHost != null) {
+      cpHost.postEnableReplicationPeer(peerId);
+    }
+  }
+
+  @Override
+  public void disableReplicationPeer(String peerId) throws ReplicationException, IOException {
+    if (cpHost != null) {
+      cpHost.preDisableReplicationPeer(peerId);
+    }
+    LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId);
+    this.replicationManager.disableReplicationPeer(peerId);
+    if (cpHost != null) {
+      cpHost.postDisableReplicationPeer(peerId);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 97fbe67..0623f2b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -1687,4 +1687,44 @@ public class MasterCoprocessorHost
       }
     });
   }
+
+  public void preEnableReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preEnableReplicationPeer(ctx, peerId);
+      }
+    });
+  }
+
+  public void postEnableReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postEnableReplicationPeer(ctx, peerId);
+      }
+    });
+  }
+
+  public void preDisableReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preDisableReplicationPeer(ctx, peerId);
+      }
+    });
+  }
+
+  public void postDisableReplicationPeer(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postDisableReplicationPeer(ctx, peerId);
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index afd807c..8ee72c6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -89,6 +89,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
@@ -1667,4 +1671,26 @@ public class MasterRpcServices extends RSRpcServices
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
+      EnableReplicationPeerRequest request) throws ServiceException {
+    try {
+      master.enableReplicationPeer(request.getPeerId());
+      return EnableReplicationPeerResponse.newBuilder().build();
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
+      DisableReplicationPeerRequest request) throws ServiceException {
+    try {
+      master.disableReplicationPeer(request.getPeerId());
+      return DisableReplicationPeerResponse.newBuilder().build();
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 5fc9d16..a7395bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -431,4 +431,16 @@ public interface MasterServices extends Server {
    * @param peerId a short name that identifies the peer
    */
   void removeReplicationPeer(String peerId) throws ReplicationException, IOException;
+
+  /**
+   * Restart the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   */
+  void enableReplicationPeer(String peerId) throws ReplicationException, IOException;
+
+  /**
+   * Stop the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   */
+  void disableReplicationPeer(String peerId) throws ReplicationException, IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
index 748f7af..8c13718 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -73,6 +73,14 @@ public class ReplicationManager {
     this.replicationPeers.unregisterPeer(peerId);
   }
 
+  public void enableReplicationPeer(String peerId) throws ReplicationException {
+    this.replicationPeers.enablePeer(peerId);
+  }
+
+  public void disableReplicationPeer(String peerId) throws ReplicationException {
+    this.replicationPeers.disablePeer(peerId);
+  }
+
   /**
    * Set a namespace in the peer config means that all tables in this namespace
    * will be replicated to the peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 0452883..eaa0611 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2708,4 +2708,16 @@ public class AccessController extends BaseMasterAndRegionObserver
       String peerId) throws IOException {
     requirePermission(getActiveUser(ctx), "removeReplicationPeer", Action.ADMIN);
   }
+
+  @Override
+  public void preEnableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+    requirePermission(getActiveUser(ctx), "enableReplicationPeer", Action.ADMIN);
+  }
+
+  @Override
+  public void preDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+    requirePermission(getActiveUser(ctx), "disableReplicationPeer", Action.ADMIN);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 55138a0..4e85d29 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -391,4 +391,12 @@ public class MockNoopMasterServices implements MasterServices, Server {
   @Override
   public void removeReplicationPeer(String peerId) throws ReplicationException {
   }
+
+  @Override
+  public void enableReplicationPeer(String peerId) throws ReplicationException, IOException {
+  }
+
+  @Override
+  public void disableReplicationPeer(String peerId) throws ReplicationException, IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index a0f6f29..956eadf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -2900,4 +2900,34 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(action, SUPERUSER, USER_ADMIN);
     verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
+
+  @Test
+  public void testEnableReplicationPeer() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preEnableReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+          "test");
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
+
+  @Test
+  public void testDisableReplicationPeer() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preDisableReplicationPeer(ObserverContext.createAndPrepare(CP_ENV, null),
+          "test");
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index a71d916..afe7b57 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -118,6 +118,8 @@ In case the table goes out of date, the unit tests which check for accuracy of p
 |        | setNamespaceQuota | superuser\|global(A)
 |        | addReplicationPeer | superuser\|global(A)
 |        | removeReplicationPeer | superuser\|global(A)
+|        | enableReplicationPeer | superuser\|global(A)
+|        | disableReplicationPeer | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 |        | closeRegion | superuser\|global(A)
 |        | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)


[38/50] [abbrv] hbase git commit: HBASE-17320 Add inclusive/exclusive support for startRow and endRow of scan

Posted by sy...@apache.org.
HBASE-17320 Add inclusive/exclusive support for startRow and endRow of scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/05b1d918
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/05b1d918
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/05b1d918

Branch: refs/heads/hbase-12439
Commit: 05b1d918b0a845ced066a66b187823c357ed673d
Parents: a3e0e0d
Author: zhangduo <zh...@apache.org>
Authored: Wed Dec 28 21:35:50 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Thu Dec 29 09:43:31 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/AsyncClientScanner.java |  22 +-
 .../client/AsyncRpcRetryingCallerFactory.java   |   2 +-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |  65 +---
 .../client/AsyncSmallScanRpcRetryingCaller.java |  71 ++--
 .../hbase/client/AsyncTableResultScanner.java   |  20 +-
 .../hadoop/hbase/client/ConnectionUtils.java    |  42 +++
 .../org/apache/hadoop/hbase/client/Scan.java    | 183 ++++++---
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  21 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java     |  21 +-
 .../org/apache/hadoop/hbase/io/TimeRange.java   |   2 -
 .../shaded/protobuf/generated/ClientProtos.java | 363 +++++++++++++-----
 .../src/main/protobuf/Client.proto              |   2 +
 .../hbase/protobuf/generated/ClientProtos.java  | 367 ++++++++++++++-----
 hbase-protocol/src/main/protobuf/Client.proto   |   2 +
 .../regionserver/DefaultStoreFileManager.java   |   4 +-
 .../hadoop/hbase/regionserver/HRegion.java      |  38 +-
 .../hadoop/hbase/regionserver/HStore.java       |  41 +--
 .../regionserver/ReversedRegionScannerImpl.java |  15 +-
 .../apache/hadoop/hbase/regionserver/Store.java |  83 +++--
 .../hbase/regionserver/StoreFileManager.java    |   6 +-
 .../regionserver/StripeStoreFileManager.java    |   4 +-
 .../CompactionScanQueryMatcher.java             |   5 +-
 .../querymatcher/LegacyScanQueryMatcher.java    |   3 +-
 .../NormalUserScanQueryMatcher.java             |  48 ++-
 .../querymatcher/RawScanQueryMatcher.java       |  48 ++-
 .../querymatcher/ScanQueryMatcher.java          |   8 +-
 .../querymatcher/UserScanQueryMatcher.java      |  15 +-
 .../client/AbstractTestAsyncTableScan.java      |  58 ++-
 .../hbase/client/TestRawAsyncTableScan.java     |  18 +-
 .../TestStripeStoreFileManager.java             |  27 +-
 30 files changed, 1102 insertions(+), 502 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index dfffd39..d7a3ed1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.EMPTY_END_ROW;
 import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
 
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
@@ -64,10 +64,10 @@ class AsyncClientScanner {
   public AsyncClientScanner(Scan scan, RawScanResultConsumer consumer, TableName tableName,
       AsyncConnectionImpl conn, long scanTimeoutNs, long rpcTimeoutNs) {
     if (scan.getStartRow() == null) {
-      scan.setStartRow(EMPTY_START_ROW);
+      scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow());
     }
     if (scan.getStopRow() == null) {
-      scan.setStopRow(EMPTY_END_ROW);
+      scan.withStopRow(EMPTY_END_ROW, scan.includeStopRow());
     }
     this.scan = scan;
     this.consumer = consumer;
@@ -117,23 +117,22 @@ class AsyncClientScanner {
     conn.callerFactory.scanSingleRegion().id(resp.scannerId).location(resp.loc).stub(resp.stub)
         .setScan(scan).consumer(consumer).resultCache(resultCache)
         .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start()
-        .whenComplete((locateType, error) -> {
+        .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start().whenComplete((hasMore, error) -> {
           if (error != null) {
             consumer.onError(error);
             return;
           }
-          if (locateType == null) {
-            consumer.onComplete();
+          if (hasMore) {
+            openScanner();
           } else {
-            openScanner(locateType);
+            consumer.onComplete();
           }
         });
   }
 
-  private void openScanner(RegionLocateType locateType) {
+  private void openScanner() {
     conn.callerFactory.<OpenScannerResponse> single().table(tableName).row(scan.getStartRow())
-        .locateType(locateType).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
+        .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
         .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::callOpenScanner).call()
         .whenComplete((resp, error) -> {
           if (error != null) {
@@ -145,7 +144,6 @@ class AsyncClientScanner {
   }
 
   public void start() {
-    openScanner(scan.isReversed() && isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE
-        : RegionLocateType.CURRENT);
+    openScanner();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index c90bee2..55c56ab 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -246,7 +246,7 @@ class AsyncRpcRetryingCallerFactory {
     /**
      * Short cut for {@code build().start()}.
      */
-    public CompletableFuture<RegionLocateType> start() {
+    public CompletableFuture<Boolean> start() {
       return build().start();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index 5bf6195..dae88a7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -17,11 +17,9 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowBefore;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
@@ -34,7 +32,6 @@ import java.util.List;
 import java.util.Optional;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
-import java.util.function.Supplier;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -53,7 +50,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientServ
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService.Interface;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
@@ -91,11 +87,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
   private final int startLogErrorsCnt;
 
-  private final Supplier<byte[]> createNextStartRowWhenError;
-
   private final Runnable completeWhenNoMoreResultsInRegion;
 
-  private final CompletableFuture<RegionLocateType> future;
+  private final CompletableFuture<Boolean> future;
 
   private final HBaseRpcController controller;
 
@@ -128,10 +122,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     this.rpcTimeoutNs = rpcTimeoutNs;
     this.startLogErrorsCnt = startLogErrorsCnt;
     if (scan.isReversed()) {
-      createNextStartRowWhenError = this::createReversedNextStartRowWhenError;
       completeWhenNoMoreResultsInRegion = this::completeReversedWhenNoMoreResultsInRegion;
     } else {
-      createNextStartRowWhenError = this::createNextStartRowWhenError;
       completeWhenNoMoreResultsInRegion = this::completeWhenNoMoreResultsInRegion;
     }
     this.future = new CompletableFuture<>();
@@ -164,23 +156,13 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
   }
 
-  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION",
-      justification = "https://github.com/findbugsproject/findbugs/issues/79")
   private void completeNoMoreResults() {
-    future.complete(null);
-  }
-
-  private void completeWithNextStartRow(byte[] nextStartRow) {
-    scan.setStartRow(nextStartRow);
-    future.complete(scan.isReversed() ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
+    future.complete(false);
   }
 
-  private byte[] createNextStartRowWhenError() {
-    return createClosestRowAfter(nextStartRowWhenError);
-  }
-
-  private byte[] createReversedNextStartRowWhenError() {
-    return createClosestRowBefore(nextStartRowWhenError);
+  private void completeWithNextStartRow(byte[] row, boolean inclusive) {
+    scan.withStartRow(row, inclusive);
+    future.complete(true);
   }
 
   private void completeWhenError(boolean closeScanner) {
@@ -189,12 +171,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
       closeScanner();
     }
     if (nextStartRowWhenError != null) {
-      scan.setStartRow(
-        includeNextStartRowWhenError ? nextStartRowWhenError : createNextStartRowWhenError.get());
+      scan.withStartRow(nextStartRowWhenError, includeNextStartRowWhenError);
     }
-    future.complete(
-      scan.isReversed() && Bytes.equals(scan.getStartRow(), loc.getRegionInfo().getEndKey())
-          ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
+    future.complete(true);
   }
 
   private void onError(Throwable error) {
@@ -251,29 +230,19 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   }
 
   private void completeWhenNoMoreResultsInRegion() {
-    if (isEmptyStopRow(scan.getStopRow())) {
-      if (isEmptyStopRow(loc.getRegionInfo().getEndKey())) {
-        completeNoMoreResults();
-      }
+    if (noMoreResultsForScan(scan, loc.getRegionInfo())) {
+      completeNoMoreResults();
     } else {
-      if (Bytes.compareTo(loc.getRegionInfo().getEndKey(), scan.getStopRow()) >= 0) {
-        completeNoMoreResults();
-      }
+      completeWithNextStartRow(loc.getRegionInfo().getEndKey(), true);
     }
-    completeWithNextStartRow(loc.getRegionInfo().getEndKey());
   }
 
   private void completeReversedWhenNoMoreResultsInRegion() {
-    if (isEmptyStopRow(scan.getStopRow())) {
-      if (isEmptyStartRow(loc.getRegionInfo().getStartKey())) {
-        completeNoMoreResults();
-      }
+    if (noMoreResultsForReverseScan(scan, loc.getRegionInfo())) {
+      completeNoMoreResults();
     } else {
-      if (Bytes.compareTo(loc.getRegionInfo().getStartKey(), scan.getStopRow()) <= 0) {
-        completeNoMoreResults();
-      }
+      completeWithNextStartRow(loc.getRegionInfo().getStartKey(), false);
     }
-    completeWithNextStartRow(loc.getRegionInfo().getStartKey());
   }
 
   private void onComplete(ScanResponse resp) {
@@ -343,9 +312,9 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   }
 
   /**
-   * @return return locate direction for next open scanner call, or null if we should stop.
+   * @return {@code true} if we should continue, otherwise {@code false}.
    */
-  public CompletableFuture<RegionLocateType> start() {
+  public CompletableFuture<Boolean> start() {
     next();
     return future;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
index c4c2074..6ffa30a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
@@ -17,8 +17,9 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getLocateType;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForReverseScan;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.noMoreResultsForScan;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -37,7 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest;
-import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Retry caller for smaller scan.
@@ -57,10 +57,6 @@ class AsyncSmallScanRpcRetryingCaller {
 
   private final long rpcTimeoutNs;
 
-  private final Function<byte[], byte[]> createClosestNextRow;
-
-  private final Runnable firstScan;
-
   private final Function<HRegionInfo, Boolean> nextScan;
 
   private final List<Result> resultList;
@@ -76,12 +72,8 @@ class AsyncSmallScanRpcRetryingCaller {
     this.scanTimeoutNs = scanTimeoutNs;
     this.rpcTimeoutNs = rpcTimeoutNs;
     if (scan.isReversed()) {
-      this.createClosestNextRow = ConnectionUtils::createClosestRowBefore;
-      this.firstScan = this::reversedFirstScan;
       this.nextScan = this::reversedNextScan;
     } else {
-      this.createClosestNextRow = ConnectionUtils::createClosestRowAfter;
-      this.firstScan = this::firstScan;
       this.nextScan = this::nextScan;
     }
     this.resultList = new ArrayList<>();
@@ -141,10 +133,9 @@ class AsyncSmallScanRpcRetryingCaller {
     }
     if (resp.hasMoreResultsInRegion) {
       if (resp.results.length > 0) {
-        scan.setStartRow(
-          createClosestNextRow.apply(resp.results[resp.results.length - 1].getRow()));
+        scan.withStartRow(resp.results[resp.results.length - 1].getRow(), false);
       }
-      scan(RegionLocateType.CURRENT);
+      scan();
       return;
     }
     if (!nextScan.apply(resp.currentRegion)) {
@@ -152,11 +143,11 @@ class AsyncSmallScanRpcRetryingCaller {
     }
   }
 
-  private void scan(RegionLocateType locateType) {
+  private void scan() {
     conn.callerFactory.<SmallScanResponse> single().table(tableName).row(scan.getStartRow())
-        .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).locateType(locateType)
-        .action(this::scan).call().whenComplete((resp, error) -> {
+        .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
+        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::scan).call()
+        .whenComplete((resp, error) -> {
           if (error != null) {
             future.completeExceptionally(error);
           } else {
@@ -166,45 +157,27 @@ class AsyncSmallScanRpcRetryingCaller {
   }
 
   public CompletableFuture<List<Result>> call() {
-    firstScan.run();
+    scan();
     return future;
   }
 
-  private void firstScan() {
-    scan(RegionLocateType.CURRENT);
-  }
-
-  private void reversedFirstScan() {
-    scan(isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
-  }
-
-  private boolean nextScan(HRegionInfo region) {
-    if (isEmptyStopRow(scan.getStopRow())) {
-      if (isEmptyStopRow(region.getEndKey())) {
-        return false;
-      }
+  private boolean nextScan(HRegionInfo info) {
+    if (noMoreResultsForScan(scan, info)) {
+      return false;
     } else {
-      if (Bytes.compareTo(region.getEndKey(), scan.getStopRow()) >= 0) {
-        return false;
-      }
+      scan.withStartRow(info.getEndKey());
+      scan();
+      return true;
     }
-    scan.setStartRow(region.getEndKey());
-    scan(RegionLocateType.CURRENT);
-    return true;
   }
 
-  private boolean reversedNextScan(HRegionInfo region) {
-    if (isEmptyStopRow(scan.getStopRow())) {
-      if (isEmptyStartRow(region.getStartKey())) {
-        return false;
-      }
+  private boolean reversedNextScan(HRegionInfo info) {
+    if (noMoreResultsForReverseScan(scan, info)) {
+      return false;
     } else {
-      if (Bytes.compareTo(region.getStartKey(), scan.getStopRow()) <= 0) {
-        return false;
-      }
+      scan.withStartRow(info.getStartKey(), false);
+      scan();
+      return true;
     }
-    scan.setStartRow(region.getStartKey());
-    scan(RegionLocateType.BEFORE);
-    return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
index cb8652e..e2c4ec3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayDeque;
 import java.util.Queue;
-import java.util.function.Function;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -65,14 +64,10 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
   // used to filter out cells that already returned when we restart a scan
   private Cell lastCell;
 
-  private Function<byte[], byte[]> createClosestRow;
-
   public AsyncTableResultScanner(RawAsyncTable table, Scan scan, long maxCacheSize) {
     this.rawTable = table;
     this.scan = scan;
     this.maxCacheSize = maxCacheSize;
-    this.createClosestRow = scan.isReversed() ? ConnectionUtils::createClosestRowBefore
-        : ConnectionUtils::createClosestRowAfter;
     table.scan(scan, this);
   }
 
@@ -84,16 +79,17 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
   private void stopPrefetch(Result lastResult) {
     prefetchStopped = true;
     if (lastResult.isPartial() || scan.getBatch() > 0) {
-      scan.setStartRow(lastResult.getRow());
+      scan.withStartRow(lastResult.getRow());
       lastCell = lastResult.rawCells()[lastResult.rawCells().length - 1];
     } else {
-      scan.setStartRow(createClosestRow.apply(lastResult.getRow()));
+      scan.withStartRow(lastResult.getRow(), false);
     }
     if (LOG.isDebugEnabled()) {
-      LOG.debug(System.identityHashCode(this) + " stop prefetching when scanning "
-          + rawTable.getName() + " as the cache size " + cacheSize
-          + " is greater than the maxCacheSize + " + maxCacheSize + ", the next start row is "
-          + Bytes.toStringBinary(scan.getStartRow()) + ", lastCell is " + lastCell);
+      LOG.debug(
+        String.format("0x%x", System.identityHashCode(this)) + " stop prefetching when scanning "
+            + rawTable.getName() + " as the cache size " + cacheSize
+            + " is greater than the maxCacheSize " + maxCacheSize + ", the next start row is "
+            + Bytes.toStringBinary(scan.getStartRow()) + ", lastCell is " + lastCell);
     }
     // Ignore an onComplete call as the scan is stopped by us.
     // Here we can not use a simple boolean flag. A scan operation can cross multiple regions and
@@ -166,7 +162,7 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
 
   private void resumePrefetch() {
     if (LOG.isDebugEnabled()) {
-      LOG.debug(System.identityHashCode(this) + " resume prefetching");
+      LOG.debug(String.format("0x%x", System.identityHashCode(this)) + " resume prefetching");
     }
     prefetchStopped = false;
     rawTable.scan(scan, this);

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 4355182..6f4a844 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -248,7 +249,10 @@ public final class ConnectionUtils {
 
   /**
    * Create the closest row before the specified row
+   * @deprecated in fact, we do not know the closest row before the given row, the result is only a
+   *             row very close to the current row. Avoid using this method in the future.
    */
+  @Deprecated
   static byte[] createClosestRowBefore(byte[] row) {
     if (row.length == 0) {
       return MAX_BYTE_ARRAY;
@@ -347,4 +351,42 @@ public final class ConnectionUtils {
   static CompletableFuture<Void> voidBatchAll(AsyncTableBase table, List<? extends Row> actions) {
     return table.<Object> batchAll(actions).thenApply(r -> null);
   }
+
+  static RegionLocateType getLocateType(Scan scan) {
+    if (scan.isReversed()) {
+      if (isEmptyStartRow(scan.getStartRow())) {
+        return RegionLocateType.BEFORE;
+      } else {
+        return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.BEFORE;
+      }
+    } else {
+      return scan.includeStartRow() ? RegionLocateType.CURRENT : RegionLocateType.AFTER;
+    }
+  }
+
+  static boolean noMoreResultsForScan(Scan scan, HRegionInfo info) {
+    if (isEmptyStopRow(info.getEndKey())) {
+      return true;
+    }
+    if (isEmptyStopRow(scan.getStopRow())) {
+      return false;
+    }
+    int c = Bytes.compareTo(info.getEndKey(), scan.getStopRow());
+    // 1. if our stop row is less than the endKey of the region
+    // 2. if our stop row is equal to the endKey of the region and we do not include the stop row
+    // for scan.
+    return c > 0 || (c == 0 && !scan.includeStopRow());
+  }
+
+  static boolean noMoreResultsForReverseScan(Scan scan, HRegionInfo info) {
+    if (isEmptyStartRow(info.getStartKey())) {
+      return true;
+    }
+    if (isEmptyStopRow(scan.getStopRow())) {
+      return false;
+    }
+    // no need to test the inclusive of the stop row as the start key of a region is included in
+    // the region.
+    return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 81a8414..2c69924 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -87,7 +87,9 @@ public class Scan extends Query {
   private static final String RAW_ATTR = "_raw_";
 
   private byte [] startRow = HConstants.EMPTY_START_ROW;
+  private boolean includeStartRow = true;
   private byte [] stopRow  = HConstants.EMPTY_END_ROW;
+  private boolean includeStopRow = false;
   private int maxVersions = 1;
   private int batch = -1;
 
@@ -106,7 +108,6 @@ public class Scan extends Query {
 
   private int storeLimit = -1;
   private int storeOffset = 0;
-  private boolean getScan;
 
   /**
    * @deprecated since 1.0.0. Use {@link #setScanMetricsEnabled(boolean)}
@@ -135,8 +136,8 @@ public class Scan extends Query {
   private long maxResultSize = -1;
   private boolean cacheBlocks = true;
   private boolean reversed = false;
-  private Map<byte [], NavigableSet<byte []>> familyMap =
-    new TreeMap<byte [], NavigableSet<byte []>>(Bytes.BYTES_COMPARATOR);
+  private Map<byte[], NavigableSet<byte[]>> familyMap =
+      new TreeMap<byte[], NavigableSet<byte[]>>(Bytes.BYTES_COMPARATOR);
   private Boolean asyncPrefetch = null;
 
   /**
@@ -175,7 +176,11 @@ public class Scan extends Query {
    */
   public Scan() {}
 
-  public Scan(byte [] startRow, Filter filter) {
+  /**
+   * @deprecated use {@code new Scan().withStartRow(startRow).setFilter(filter)} instead.
+   */
+  @Deprecated
+  public Scan(byte[] startRow, Filter filter) {
     this(startRow);
     this.filter = filter;
   }
@@ -183,24 +188,26 @@ public class Scan extends Query {
   /**
    * Create a Scan operation starting at the specified row.
    * <p>
-   * If the specified row does not exist, the Scanner will start from the
-   * next closest row after the specified row.
+   * If the specified row does not exist, the Scanner will start from the next closest row after the
+   * specified row.
    * @param startRow row to start scanner at or after
+   * @deprecated use {@code new Scan().withStartRow(startRow)} instead.
    */
-  public Scan(byte [] startRow) {
-    this.startRow = startRow;
+  @Deprecated
+  public Scan(byte[] startRow) {
+    setStartRow(startRow);
   }
 
   /**
    * Create a Scan operation for the range of rows specified.
    * @param startRow row to start scanner at or after (inclusive)
    * @param stopRow row to stop scanner before (exclusive)
+   * @deprecated use {@code new Scan().withStartRow(startRow).withStopRow(stopRow)} instead.
    */
-  public Scan(byte [] startRow, byte [] stopRow) {
-    this.startRow = startRow;
-    this.stopRow = stopRow;
-    //if the startRow and stopRow both are empty, it is not a Get
-    this.getScan = isStartRowAndEqualsStopRow();
+  @Deprecated
+  public Scan(byte[] startRow, byte[] stopRow) {
+    setStartRow(startRow);
+    setStopRow(stopRow);
   }
 
   /**
@@ -211,7 +218,9 @@ public class Scan extends Query {
    */
   public Scan(Scan scan) throws IOException {
     startRow = scan.getStartRow();
+    includeStartRow = scan.includeStartRow();
     stopRow  = scan.getStopRow();
+    includeStopRow = scan.includeStopRow();
     maxVersions = scan.getMaxVersions();
     batch = scan.getBatch();
     storeLimit = scan.getMaxResultsPerColumnFamily();
@@ -219,7 +228,6 @@ public class Scan extends Query {
     caching = scan.getCaching();
     maxResultSize = scan.getMaxResultSize();
     cacheBlocks = scan.getCacheBlocks();
-    getScan = scan.isGetScan();
     filter = scan.getFilter(); // clone?
     loadColumnFamiliesOnDemand = scan.getLoadColumnFamiliesOnDemandValue();
     consistency = scan.getConsistency();
@@ -228,8 +236,7 @@ public class Scan extends Query {
     asyncPrefetch = scan.isAsyncPrefetch();
     small = scan.isSmall();
     allowPartialResults = scan.getAllowPartialResults();
-    TimeRange ctr = scan.getTimeRange();
-    tr = new TimeRange(ctr.getMin(), ctr.getMax());
+    tr = scan.getTimeRange(); // TimeRange is immutable
     Map<byte[], NavigableSet<byte[]>> fams = scan.getFamilyMap();
     for (Map.Entry<byte[],NavigableSet<byte[]>> entry : fams.entrySet()) {
       byte [] fam = entry.getKey();
@@ -258,7 +265,9 @@ public class Scan extends Query {
    */
   public Scan(Get get) {
     this.startRow = get.getRow();
+    this.includeStartRow = true;
     this.stopRow = get.getRow();
+    this.includeStopRow = true;
     this.filter = get.getFilter();
     this.cacheBlocks = get.getCacheBlocks();
     this.maxVersions = get.getMaxVersions();
@@ -266,7 +275,6 @@ public class Scan extends Query {
     this.storeOffset = get.getRowOffsetPerColumnFamily();
     this.tr = get.getTimeRange();
     this.familyMap = get.getFamilyMap();
-    this.getScan = true;
     this.asyncPrefetch = false;
     this.consistency = get.getConsistency();
     this.setIsolationLevel(get.getIsolationLevel());
@@ -282,13 +290,13 @@ public class Scan extends Query {
   }
 
   public boolean isGetScan() {
-    return this.getScan || isStartRowAndEqualsStopRow();
+    return includeStartRow && includeStopRow && areStartRowAndStopRowEqual(startRow, stopRow);
   }
 
-  private boolean isStartRowAndEqualsStopRow() {
-    return this.startRow != null && this.startRow.length > 0 &&
-        Bytes.equals(this.startRow, this.stopRow);
+  private static boolean areStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
+    return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
   }
+
   /**
    * Get all columns from the specified family.
    * <p>
@@ -383,45 +391,120 @@ public class Scan extends Query {
   /**
    * Set the start row of the scan.
    * <p>
-   * If the specified row does not exist, the Scanner will start from the
-   * next closest row after the specified row.
+   * If the specified row does not exist, the Scanner will start from the next closest row after the
+   * specified row.
+   * @param startRow row to start scanner at or after
+   * @return this
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
+   *             the stop row to keep compatible with the old behavior.
+   */
+  @Deprecated
+  public Scan setStartRow(byte[] startRow) {
+    withStartRow(startRow);
+    if (areStartRowAndStopRowEqual(startRow, stopRow)) {
+      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
+      this.includeStopRow = true;
+    }
+    return this;
+  }
+
+  /**
+   * Set the start row of the scan.
+   * <p>
+   * If the specified row does not exist, the Scanner will start from the next closest row after the
+   * specified row.
+   * @param startRow row to start scanner at or after
+   * @return this
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
+   */
+  public Scan withStartRow(byte[] startRow) {
+    return withStartRow(startRow, true);
+  }
+
+  /**
+   * Set the start row of the scan.
+   * <p>
+   * If the specified row does not exist, or the {@code inclusive} is {@code false}, the Scanner
+   * will start from the next closest row after the specified row.
    * @param startRow row to start scanner at or after
+   * @param inclusive whether we should include the start row when scan
    * @return this
-   * @throws IllegalArgumentException if startRow does not meet criteria
-   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @throws IllegalArgumentException if startRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
-  public Scan setStartRow(byte [] startRow) {
+  public Scan withStartRow(byte[] startRow, boolean inclusive) {
     if (Bytes.len(startRow) > HConstants.MAX_ROW_LENGTH) {
-      throw new IllegalArgumentException(
-        "startRow's length must be less than or equal to " +
-        HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
-        " for a row key.");
+      throw new IllegalArgumentException("startRow's length must be less than or equal to "
+          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
     }
     this.startRow = startRow;
+    this.includeStartRow = inclusive;
     return this;
   }
 
   /**
    * Set the stop row of the scan.
+   * <p>
+   * The scan will include rows that are lexicographically less than the provided stopRow.
+   * <p>
+   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
+   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
+   * </p>
+   * @param stopRow row to end at (exclusive)
+   * @return this
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @deprecated use {@link #withStartRow(byte[])} instead. This method may change the inclusive of
+   *             the stop row to keep compatible with the old behavior.
+   */
+  @Deprecated
+  public Scan setStopRow(byte[] stopRow) {
+    withStopRow(stopRow);
+    if (areStartRowAndStopRowEqual(startRow, stopRow)) {
+      // for keeping the old behavior that a scan with the same start and stop row is a get scan.
+      this.includeStopRow = true;
+    }
+    return this;
+  }
+
+  /**
+   * Set the stop row of the scan.
+   * <p>
+   * The scan will include rows that are lexicographically less than the provided stopRow.
+   * <p>
+   * <b>Note:</b> When doing a filter for a rowKey <u>Prefix</u> use
+   * {@link #setRowPrefixFilter(byte[])}. The 'trailing 0' will not yield the desired result.
+   * </p>
    * @param stopRow row to end at (exclusive)
+   * @return this
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
+   */
+  public Scan withStopRow(byte[] stopRow) {
+    return withStopRow(stopRow, false);
+  }
+
+  /**
+   * Set the stop row of the scan.
    * <p>
-   * The scan will include rows that are lexicographically less than
-   * the provided stopRow.
-   * <p><b>Note:</b> When doing a filter for a rowKey <u>Prefix</u>
-   * use {@link #setRowPrefixFilter(byte[])}.
-   * The 'trailing 0' will not yield the desired result.</p>
+   * The scan will include rows that are lexicographically less than (or equal to if
+   * {@code inclusive} is {@code true}) the provided stopRow.
+   * @param stopRow row to end at
+   * @param inclusive whether we should include the stop row when scan
    * @return this
-   * @throws IllegalArgumentException if stopRow does not meet criteria
-   * for a row key (when length exceeds {@link HConstants#MAX_ROW_LENGTH})
+   * @throws IllegalArgumentException if stopRow does not meet criteria for a row key (when length
+   *           exceeds {@link HConstants#MAX_ROW_LENGTH})
    */
-  public Scan setStopRow(byte [] stopRow) {
+  public Scan withStopRow(byte[] stopRow, boolean inclusive) {
     if (Bytes.len(stopRow) > HConstants.MAX_ROW_LENGTH) {
-      throw new IllegalArgumentException(
-        "stopRow's length must be less than or equal to " +
-        HConstants.MAX_ROW_LENGTH + " to meet the criteria" +
-        " for a row key.");
+      throw new IllegalArgumentException("stopRow's length must be less than or equal to "
+          + HConstants.MAX_ROW_LENGTH + " to meet the criteria" + " for a row key.");
     }
     this.stopRow = stopRow;
+    this.includeStopRow = inclusive;
     return this;
   }
 
@@ -636,13 +719,27 @@ public class Scan extends Query {
   }
 
   /**
+   * @return if we should include start row when scan
+   */
+  public boolean includeStartRow() {
+    return includeStartRow;
+  }
+
+  /**
    * @return the stoprow
    */
-  public byte [] getStopRow() {
+  public byte[] getStopRow() {
     return this.stopRow;
   }
 
   /**
+   * @return if we should include stop row when scan
+   */
+  public boolean includeStopRow() {
+    return includeStopRow;
+  }
+
+  /**
    * @return the max number of versions to fetch
    */
   public int getMaxVersions() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index d6dc7e9..d3898d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -911,6 +911,12 @@ public final class ProtobufUtil {
     if (mvccReadPoint > 0) {
       scanBuilder.setMvccReadPoint(mvccReadPoint);
     }
+    if (!scan.includeStartRow()) {
+      scanBuilder.setIncludeStartRow(false);
+    }
+    if (scan.includeStopRow()) {
+      scanBuilder.setIncludeStopRow(true);
+    }
     return scanBuilder.build();
   }
 
@@ -923,15 +929,24 @@ public final class ProtobufUtil {
    */
   public static Scan toScan(
       final ClientProtos.Scan proto) throws IOException {
-    byte [] startRow = HConstants.EMPTY_START_ROW;
-    byte [] stopRow  = HConstants.EMPTY_END_ROW;
+    byte[] startRow = HConstants.EMPTY_START_ROW;
+    byte[] stopRow = HConstants.EMPTY_END_ROW;
+    boolean includeStartRow = true;
+    boolean includeStopRow = false;
     if (proto.hasStartRow()) {
       startRow = proto.getStartRow().toByteArray();
     }
     if (proto.hasStopRow()) {
       stopRow = proto.getStopRow().toByteArray();
     }
-    Scan scan = new Scan(startRow, stopRow);
+    if (proto.hasIncludeStartRow()) {
+      includeStartRow = proto.getIncludeStartRow();
+    }
+    if (proto.hasIncludeStopRow()) {
+      includeStopRow = proto.getIncludeStopRow();
+    }
+    Scan scan =
+        new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
     if (proto.hasCacheBlocks()) {
       scan.setCacheBlocks(proto.getCacheBlocks());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 0f2cf1d..285e19a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -1024,6 +1024,12 @@ public final class ProtobufUtil {
     if (mvccReadPoint > 0) {
       scanBuilder.setMvccReadPoint(mvccReadPoint);
     }
+    if (!scan.includeStartRow()) {
+      scanBuilder.setIncludeStartRow(false);
+    }
+    if (scan.includeStopRow()) {
+      scanBuilder.setIncludeStopRow(true);
+    }
     return scanBuilder.build();
   }
 
@@ -1036,15 +1042,24 @@ public final class ProtobufUtil {
    */
   public static Scan toScan(
       final ClientProtos.Scan proto) throws IOException {
-    byte [] startRow = HConstants.EMPTY_START_ROW;
-    byte [] stopRow  = HConstants.EMPTY_END_ROW;
+    byte[] startRow = HConstants.EMPTY_START_ROW;
+    byte[] stopRow = HConstants.EMPTY_END_ROW;
+    boolean includeStartRow = true;
+    boolean includeStopRow = false;
     if (proto.hasStartRow()) {
       startRow = proto.getStartRow().toByteArray();
     }
     if (proto.hasStopRow()) {
       stopRow = proto.getStopRow().toByteArray();
     }
-    Scan scan = new Scan(startRow, stopRow);
+    if (proto.hasIncludeStartRow()) {
+      includeStartRow = proto.getIncludeStartRow();
+    }
+    if (proto.hasIncludeStopRow()) {
+      includeStopRow = proto.getIncludeStopRow();
+    }
+    Scan scan =
+        new Scan().withStartRow(startRow, includeStartRow).withStopRow(stopRow, includeStopRow);
     if (proto.hasCacheBlocks()) {
       scan.setCacheBlocks(proto.getCacheBlocks());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
index 2efcde1..77b9495 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TimeRange.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.hbase.io;
 
-import java.io.IOException;
-
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.util.Bytes;

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java
index eab62eb..ef44295 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ClientProtos.java
@@ -14563,6 +14563,24 @@ public final class ClientProtos {
      * <code>optional uint64 mvcc_read_point = 20 [default = 0];</code>
      */
     long getMvccReadPoint();
+
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    boolean hasIncludeStartRow();
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    boolean getIncludeStartRow();
+
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    boolean hasIncludeStopRow();
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    boolean getIncludeStopRow();
   }
   /**
    * <pre>
@@ -14604,6 +14622,8 @@ public final class ClientProtos {
       allowPartialResults_ = false;
       cfTimeRange_ = java.util.Collections.emptyList();
       mvccReadPoint_ = 0L;
+      includeStartRow_ = true;
+      includeStopRow_ = false;
     }
 
     @java.lang.Override
@@ -14768,6 +14788,16 @@ public final class ClientProtos {
               mvccReadPoint_ = input.readUInt64();
               break;
             }
+            case 168: {
+              bitField0_ |= 0x00020000;
+              includeStartRow_ = input.readBool();
+              break;
+            }
+            case 176: {
+              bitField0_ |= 0x00040000;
+              includeStopRow_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -15183,6 +15213,36 @@ public final class ClientProtos {
       return mvccReadPoint_;
     }
 
+    public static final int INCLUDE_START_ROW_FIELD_NUMBER = 21;
+    private boolean includeStartRow_;
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    public boolean hasIncludeStartRow() {
+      return ((bitField0_ & 0x00020000) == 0x00020000);
+    }
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    public boolean getIncludeStartRow() {
+      return includeStartRow_;
+    }
+
+    public static final int INCLUDE_STOP_ROW_FIELD_NUMBER = 22;
+    private boolean includeStopRow_;
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    public boolean hasIncludeStopRow() {
+      return ((bitField0_ & 0x00040000) == 0x00040000);
+    }
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    public boolean getIncludeStopRow() {
+      return includeStopRow_;
+    }
+
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
@@ -15279,6 +15339,12 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00010000) == 0x00010000)) {
         output.writeUInt64(20, mvccReadPoint_);
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        output.writeBool(21, includeStartRow_);
+      }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        output.writeBool(22, includeStopRow_);
+      }
       unknownFields.writeTo(output);
     }
 
@@ -15367,6 +15433,14 @@ public final class ClientProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeUInt64Size(20, mvccReadPoint_);
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeBoolSize(21, includeStartRow_);
+      }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeBoolSize(22, includeStopRow_);
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -15474,6 +15548,16 @@ public final class ClientProtos {
         result = result && (getMvccReadPoint()
             == other.getMvccReadPoint());
       }
+      result = result && (hasIncludeStartRow() == other.hasIncludeStartRow());
+      if (hasIncludeStartRow()) {
+        result = result && (getIncludeStartRow()
+            == other.getIncludeStartRow());
+      }
+      result = result && (hasIncludeStopRow() == other.hasIncludeStopRow());
+      if (hasIncludeStopRow()) {
+        result = result && (getIncludeStopRow()
+            == other.getIncludeStopRow());
+      }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
@@ -15572,6 +15656,16 @@ public final class ClientProtos {
         hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
             getMvccReadPoint());
       }
+      if (hasIncludeStartRow()) {
+        hash = (37 * hash) + INCLUDE_START_ROW_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
+            getIncludeStartRow());
+      }
+      if (hasIncludeStopRow()) {
+        hash = (37 * hash) + INCLUDE_STOP_ROW_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
+            getIncludeStopRow());
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -15765,6 +15859,10 @@ public final class ClientProtos {
         }
         mvccReadPoint_ = 0L;
         bitField0_ = (bitField0_ & ~0x00080000);
+        includeStartRow_ = true;
+        bitField0_ = (bitField0_ & ~0x00100000);
+        includeStopRow_ = false;
+        bitField0_ = (bitField0_ & ~0x00200000);
         return this;
       }
 
@@ -15892,6 +15990,14 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00010000;
         }
         result.mvccReadPoint_ = mvccReadPoint_;
+        if (((from_bitField0_ & 0x00100000) == 0x00100000)) {
+          to_bitField0_ |= 0x00020000;
+        }
+        result.includeStartRow_ = includeStartRow_;
+        if (((from_bitField0_ & 0x00200000) == 0x00200000)) {
+          to_bitField0_ |= 0x00040000;
+        }
+        result.includeStopRow_ = includeStopRow_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -16063,6 +16169,12 @@ public final class ClientProtos {
         if (other.hasMvccReadPoint()) {
           setMvccReadPoint(other.getMvccReadPoint());
         }
+        if (other.hasIncludeStartRow()) {
+          setIncludeStartRow(other.getIncludeStartRow());
+        }
+        if (other.hasIncludeStopRow()) {
+          setIncludeStopRow(other.getIncludeStopRow());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -17572,6 +17684,70 @@ public final class ClientProtos {
         onChanged();
         return this;
       }
+
+      private boolean includeStartRow_ = true;
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public boolean hasIncludeStartRow() {
+        return ((bitField0_ & 0x00100000) == 0x00100000);
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public boolean getIncludeStartRow() {
+        return includeStartRow_;
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public Builder setIncludeStartRow(boolean value) {
+        bitField0_ |= 0x00100000;
+        includeStartRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public Builder clearIncludeStartRow() {
+        bitField0_ = (bitField0_ & ~0x00100000);
+        includeStartRow_ = true;
+        onChanged();
+        return this;
+      }
+
+      private boolean includeStopRow_ ;
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public boolean hasIncludeStopRow() {
+        return ((bitField0_ & 0x00200000) == 0x00200000);
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public boolean getIncludeStopRow() {
+        return includeStopRow_;
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public Builder setIncludeStopRow(boolean value) {
+        bitField0_ |= 0x00200000;
+        includeStopRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public Builder clearIncludeStopRow() {
+        bitField0_ = (bitField0_ & ~0x00200000);
+        includeStopRow_ = false;
+        onChanged();
+        return this;
+      }
       public final Builder setUnknownFields(
           final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
         return super.setUnknownFields(unknownFields);
@@ -40658,7 +40834,7 @@ public final class ClientProtos {
       "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
       "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
       "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
-      "\001(\010\"\331\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
+      "\001(\010\"\233\005\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
       "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
       "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
       "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
@@ -40673,97 +40849,98 @@ public final class ClientProtos {
       "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
       "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
       "lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024",
-      " \001(\004:\0010\"\246\002\n\013ScanRequest\022)\n\006region\030\001 \001(\0132" +
-      "\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030\002 \001(\013" +
-      "2\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016" +
-      "number_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 " +
-      "\001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027client_han" +
-      "dles_partials\030\007 \001(\010\022!\n\031client_handles_he" +
-      "artbeats\030\010 \001(\010\022\032\n\022track_scan_metrics\030\t \001" +
-      "(\010\022\024\n\005renew\030\n \001(\010:\005false\"\266\002\n\014ScanRespons" +
-      "e\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_i" +
-      "d\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001",
-      "(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n" +
-      "\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result\030" +
-      "\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n" +
-      "\021heartbeat_message\030\t \001(\010\022+\n\014scan_metrics" +
-      "\030\n \001(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_re" +
-      "ad_point\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\022>\n\013family_path\030\002 \003(\0132).hbase.pb.Bu" +
-      "lkLoadHFileRequest.FamilyPath\022\026\n\016assign_" +
-      "seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.",
-      "pb.DelegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030" +
-      "\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016" +
-      "\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoad" +
-      "HFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017Delegat" +
-      "ionToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password" +
-      "\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n" +
-      "\026PrepareBulkLoadRequest\022\'\n\ntable_name\030\001 " +
-      "\002(\0132\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\013" +
-      "2\031.hbase.pb.RegionSpecifier\"-\n\027PrepareBu" +
-      "lkLoadResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cl",
-      "eanupBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t" +
-      "\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpecif" +
-      "ier\"\031\n\027CleanupBulkLoadResponse\"a\n\026Coproc" +
-      "essorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service" +
-      "_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007requ" +
-      "est\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n" +
-      "\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n" +
-      "\031CoprocessorServiceRequest\022)\n\006region\030\001 \002" +
-      "(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030\002 " +
-      "\002(\0132 .hbase.pb.CoprocessorServiceCall\"o\n",
-      "\032CoprocessorServiceResponse\022)\n\006region\030\001 " +
-      "\002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005value\030" +
-      "\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Actio" +
-      "n\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hba" +
-      "se.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase" +
-      ".pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb" +
-      ".CoprocessorServiceCall\"k\n\014RegionAction\022" +
-      ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
-      "er\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hba" +
-      "se.pb.Action\"c\n\017RegionLoadStats\022\027\n\014memst",
-      "oreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:" +
-      "\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024Mul" +
-      "tiRegionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbas" +
-      "e.pb.RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hba" +
-      "se.pb.RegionLoadStats\"\336\001\n\021ResultOrExcept" +
-      "ion\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hba" +
-      "se.pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase." +
-      "pb.NameBytesPair\022:\n\016service_result\030\004 \001(\013" +
-      "2\".hbase.pb.CoprocessorServiceResult\0220\n\t" +
-      "loadStats\030\005 \001(\0132\031.hbase.pb.RegionLoadSta",
-      "tsB\002\030\001\"x\n\022RegionActionResult\0226\n\021resultOr" +
-      "Exception\030\001 \003(\0132\033.hbase.pb.ResultOrExcep" +
-      "tion\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameB" +
-      "ytesPair\"x\n\014MultiRequest\022,\n\014regionAction" +
-      "\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceG" +
-      "roup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" +
-      ".Condition\"\226\001\n\rMultiResponse\0228\n\022regionAc" +
-      "tionResult\030\001 \003(\0132\034.hbase.pb.RegionAction" +
-      "Result\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStati" +
-      "stics\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadSt",
-      "ats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" +
-      "NE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb" +
-      ".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" +
-      "tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" +
-      ".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" +
-      "equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" +
-      "adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" +
-      "\037.hbase.pb.BulkLoadHFileResponse\022V\n\017Prep" +
-      "areBulkLoad\022 .hbase.pb.PrepareBulkLoadRe" +
-      "quest\032!.hbase.pb.PrepareBulkLoadResponse",
-      "\022V\n\017CleanupBulkLoad\022 .hbase.pb.CleanupBu" +
-      "lkLoadRequest\032!.hbase.pb.CleanupBulkLoad" +
-      "Response\022X\n\013ExecService\022#.hbase.pb.Copro" +
-      "cessorServiceRequest\032$.hbase.pb.Coproces" +
-      "sorServiceResponse\022d\n\027ExecRegionServerSe" +
-      "rvice\022#.hbase.pb.CoprocessorServiceReque" +
-      "st\032$.hbase.pb.CoprocessorServiceResponse" +
-      "\0228\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbas" +
-      "e.pb.MultiResponseBI\n1org.apache.hadoop." +
-      "hbase.shaded.protobuf.generatedB\014ClientP",
-      "rotosH\001\210\001\001\240\001\001"
+      " \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004true" +
+      "\022\037\n\020include_stop_row\030\026 \001(\010:\005false\"\246\002\n\013Sc" +
+      "anRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Reg" +
+      "ionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Sc" +
+      "an\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows" +
+      "\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_ca" +
+      "ll_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030" +
+      "\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010" +
+      "\022\032\n\022track_scan_metrics\030\t \001(\010\022\024\n\005renew\030\n " +
+      "\001(\010:\005false\"\266\002\n\014ScanResponse\022\030\n\020cells_per",
+      "_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014mor" +
+      "e_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030" +
+      "\005 \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037" +
+      "\n\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_" +
+      "results_in_region\030\010 \001(\010\022\031\n\021heartbeat_mes" +
+      "sage\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase" +
+      ".pb.ScanMetrics\022\032\n\017mvcc_read_point\030\013 \001(\004" +
+      ":\0010\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001" +
+      " \002(\0132\031.hbase.pb.RegionSpecifier\022>\n\013famil" +
+      "y_path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReq",
+      "uest.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022" +
+      "+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationT" +
+      "oken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 " +
+      "\001(\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" +
+      "\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" +
+      "\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nid" +
+      "entifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind" +
+      "\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLo" +
+      "adRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
+      ".TableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Re",
+      "gionSpecifier\"-\n\027PrepareBulkLoadResponse" +
+      "\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadR" +
+      "equest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001" +
+      "(\0132\031.hbase.pb.RegionSpecifier\"\031\n\027Cleanup" +
+      "BulkLoadResponse\"a\n\026CoprocessorServiceCa" +
+      "ll\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n" +
+      "\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030C" +
+      "oprocessorServiceResult\022&\n\005value\030\001 \001(\0132\027" +
+      ".hbase.pb.NameBytesPair\"v\n\031CoprocessorSe" +
+      "rviceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.",
+      "RegionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb" +
+      ".CoprocessorServiceCall\"o\n\032CoprocessorSe" +
+      "rviceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb" +
+      ".RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase." +
+      "pb.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001" +
+      "(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.Mutation" +
+      "Proto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014ser" +
+      "vice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSe" +
+      "rviceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(" +
+      "\0132\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002",
+      " \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c" +
+      "\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:" +
+      "\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compacti" +
+      "onPressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadSt" +
+      "ats\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpe" +
+      "cifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLo" +
+      "adStats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001" +
+      " \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*" +
+      "\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytesPa" +
+      "ir\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Co",
+      "processorServiceResult\0220\n\tloadStats\030\005 \001(" +
+      "\0132\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Regi" +
+      "onActionResult\0226\n\021resultOrException\030\001 \003(" +
+      "\0132\033.hbase.pb.ResultOrException\022*\n\texcept" +
+      "ion\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mu" +
+      "ltiRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase" +
+      ".pb.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\t" +
+      "condition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n" +
+      "\rMultiResponse\0228\n\022regionActionResult\030\001 \003" +
+      "(\0132\034.hbase.pb.RegionActionResult\022\021\n\tproc",
+      "essed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036." +
+      "hbase.pb.MultiRegionLoadStats*\'\n\013Consist" +
+      "ency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClien" +
+      "tService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025." +
+      "hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.p" +
+      "b.MutateRequest\032\030.hbase.pb.MutateRespons" +
+      "e\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase" +
+      ".pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbas" +
+      "e.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bul" +
+      "kLoadHFileResponse\022V\n\017PrepareBulkLoad\022 .",
+      "hbase.pb.PrepareBulkLoadRequest\032!.hbase." +
+      "pb.PrepareBulkLoadResponse\022V\n\017CleanupBul" +
+      "kLoad\022 .hbase.pb.CleanupBulkLoadRequest\032" +
+      "!.hbase.pb.CleanupBulkLoadResponse\022X\n\013Ex" +
+      "ecService\022#.hbase.pb.CoprocessorServiceR" +
+      "equest\032$.hbase.pb.CoprocessorServiceResp" +
+      "onse\022d\n\027ExecRegionServerService\022#.hbase." +
+      "pb.CoprocessorServiceRequest\032$.hbase.pb." +
+      "CoprocessorServiceResponse\0228\n\005Multi\022\026.hb" +
+      "ase.pb.MultiRequest\032\027.hbase.pb.MultiResp",
+      "onseBI\n1org.apache.hadoop.hbase.shaded.p" +
+      "rotobuf.generatedB\014ClientProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -40865,7 +41042,7 @@ public final class ClientProtos {
     internal_static_hbase_pb_Scan_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_Scan_descriptor,
-        new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", });
+        new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", });
     internal_static_hbase_pb_ScanRequest_descriptor =
       getDescriptor().getMessageTypes().get(12);
     internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-protocol-shaded/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Client.proto b/hbase-protocol-shaded/src/main/protobuf/Client.proto
index 9a7fea2..2793b89 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Client.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Client.proto
@@ -256,6 +256,8 @@ message Scan {
   optional bool allow_partial_results = 18;
   repeated ColumnFamilyTimeRange cf_time_range = 19;
   optional uint64 mvcc_read_point = 20 [default = 0];
+  optional bool include_start_row = 21 [default = true];
+  optional bool include_stop_row = 22 [default = false];
 }
 
 /**


[45/50] [abbrv] hbase git commit: HBASE-16869 Fixed typo in 'Disabling Blockcache' doc

Posted by sy...@apache.org.
HBASE-16869 Fixed typo in 'Disabling Blockcache' doc

Signed-off-by: Jerry He <je...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/521730eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/521730eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/521730eb

Branch: refs/heads/hbase-12439
Commit: 521730ebc71995df514748b03554e56bfd8beafa
Parents: 0e48665
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Jan 1 20:53:52 2017 +0100
Committer: Jerry He <je...@apache.org>
Committed: Sun Jan 1 12:45:21 2017 -0800

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/configuration.adoc | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/521730eb/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index 6e356bc..baa4d4c 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -938,7 +938,7 @@ See <<master.processes.loadbalancer,master.processes.loadbalancer>> for more inf
 [[disabling.blockcache]]
 ==== Disabling Blockcache
 
-Do not turn off block cache (You'd do it by setting `hbase.block.cache.size` to zero). Currently we do not do well if you do this because the RegionServer will spend all its time loading HFile indices over and over again.
+Do not turn off block cache (You'd do it by setting `hfile.block.cache.size` to zero). Currently we do not do well if you do this because the RegionServer will spend all its time loading HFile indices over and over again.
 If your working set is such that block cache does you no good, at least size the block cache such that HFile indices will stay up in the cache (you can get a rough idea on the size you need by surveying RegionServer UIs; you'll see index block size accounted near the top of the webpage).
 
 [[nagles]]


[12/50] [abbrv] hbase git commit: HBASE-17330 SnapshotFileCache will always refresh the file cache (Jianwei Cui)

Posted by sy...@apache.org.
HBASE-17330 SnapshotFileCache will always refresh the file cache (Jianwei Cui)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66781864
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66781864
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66781864

Branch: refs/heads/hbase-12439
Commit: 66781864aaf78e8c8afb0978a7f68b6773d69649
Parents: fc93de5
Author: tedyu <yu...@gmail.com>
Authored: Thu Dec 22 02:29:27 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Thu Dec 22 02:29:27 2016 -0800

----------------------------------------------------------------------
 .../master/snapshot/SnapshotFileCache.java      | 34 +++-----------------
 1 file changed, 4 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/66781864/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
index f80d962..f03344c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotFileCache.java
@@ -205,14 +205,10 @@ public class SnapshotFileCache implements Stoppable {
   }
 
   private synchronized void refreshCache() throws IOException {
-    long lastTimestamp = Long.MAX_VALUE;
-    boolean hasChanges = false;
-
     // get the status of the snapshots directory and check if it is has changes
+    FileStatus dirStatus;
     try {
-      FileStatus dirStatus = fs.getFileStatus(snapshotDir);
-      lastTimestamp = dirStatus.getModificationTime();
-      hasChanges |= (lastTimestamp >= lastModifiedTime);
+      dirStatus = fs.getFileStatus(snapshotDir);
     } catch (FileNotFoundException e) {
       if (this.cache.size() > 0) {
         LOG.error("Snapshot directory: " + snapshotDir + " doesn't exist");
@@ -220,30 +216,8 @@ public class SnapshotFileCache implements Stoppable {
       return;
     }
 
-    // get the status of the snapshots temporary directory and check if it has changes
-    // The top-level directory timestamp is not updated, so we have to check the inner-level.
-    try {
-      Path snapshotTmpDir = new Path(snapshotDir, SnapshotDescriptionUtils.SNAPSHOT_TMP_DIR_NAME);
-      FileStatus tempDirStatus = fs.getFileStatus(snapshotTmpDir);
-      lastTimestamp = Math.min(lastTimestamp, tempDirStatus.getModificationTime());
-      hasChanges |= (lastTimestamp >= lastModifiedTime);
-      if (!hasChanges) {
-        FileStatus[] tmpSnapshots = FSUtils.listStatus(fs, snapshotDir);
-        if (tmpSnapshots != null) {
-          for (FileStatus dirStatus: tmpSnapshots) {
-            lastTimestamp = Math.min(lastTimestamp, dirStatus.getModificationTime());
-          }
-          hasChanges |= (lastTimestamp >= lastModifiedTime);
-        }
-      }
-    } catch (FileNotFoundException e) {
-      // Nothing todo, if the tmp dir is empty
-    }
-
     // if the snapshot directory wasn't modified since we last check, we are done
-    if (!hasChanges) {
-      return;
-    }
+    if (dirStatus.getModificationTime() <= this.lastModifiedTime) return;
 
     // directory was modified, so we need to reload our cache
     // there could be a slight race here where we miss the cache, check the directory modification
@@ -251,7 +225,7 @@ public class SnapshotFileCache implements Stoppable {
     // However, snapshot directories are only created once, so this isn't an issue.
 
     // 1. update the modified time
-    this.lastModifiedTime = lastTimestamp;
+    this.lastModifiedTime = dirStatus.getModificationTime();
 
     // 2.clear the cache
     this.cache.clear();


[37/50] [abbrv] hbase git commit: HBASE-17320 Add inclusive/exclusive support for startRow and endRow of scan

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index d7e2b6f..087576c 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -14229,6 +14229,26 @@ public final class ClientProtos {
      * <code>optional uint64 mvcc_read_point = 20 [default = 0];</code>
      */
     long getMvccReadPoint();
+
+    // optional bool include_start_row = 21 [default = true];
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    boolean hasIncludeStartRow();
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    boolean getIncludeStartRow();
+
+    // optional bool include_stop_row = 22 [default = false];
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    boolean hasIncludeStopRow();
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    boolean getIncludeStopRow();
   }
   /**
    * Protobuf type {@code hbase.pb.Scan}
@@ -14423,6 +14443,16 @@ public final class ClientProtos {
               mvccReadPoint_ = input.readUInt64();
               break;
             }
+            case 168: {
+              bitField0_ |= 0x00020000;
+              includeStartRow_ = input.readBool();
+              break;
+            }
+            case 176: {
+              bitField0_ |= 0x00040000;
+              includeStopRow_ = input.readBool();
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -14872,6 +14902,38 @@ public final class ClientProtos {
       return mvccReadPoint_;
     }
 
+    // optional bool include_start_row = 21 [default = true];
+    public static final int INCLUDE_START_ROW_FIELD_NUMBER = 21;
+    private boolean includeStartRow_;
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    public boolean hasIncludeStartRow() {
+      return ((bitField0_ & 0x00020000) == 0x00020000);
+    }
+    /**
+     * <code>optional bool include_start_row = 21 [default = true];</code>
+     */
+    public boolean getIncludeStartRow() {
+      return includeStartRow_;
+    }
+
+    // optional bool include_stop_row = 22 [default = false];
+    public static final int INCLUDE_STOP_ROW_FIELD_NUMBER = 22;
+    private boolean includeStopRow_;
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    public boolean hasIncludeStopRow() {
+      return ((bitField0_ & 0x00040000) == 0x00040000);
+    }
+    /**
+     * <code>optional bool include_stop_row = 22 [default = false];</code>
+     */
+    public boolean getIncludeStopRow() {
+      return includeStopRow_;
+    }
+
     private void initFields() {
       column_ = java.util.Collections.emptyList();
       attribute_ = java.util.Collections.emptyList();
@@ -14893,6 +14955,8 @@ public final class ClientProtos {
       allowPartialResults_ = false;
       cfTimeRange_ = java.util.Collections.emptyList();
       mvccReadPoint_ = 0L;
+      includeStartRow_ = true;
+      includeStopRow_ = false;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -14990,6 +15054,12 @@ public final class ClientProtos {
       if (((bitField0_ & 0x00010000) == 0x00010000)) {
         output.writeUInt64(20, mvccReadPoint_);
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        output.writeBool(21, includeStartRow_);
+      }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        output.writeBool(22, includeStopRow_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -15079,6 +15149,14 @@ public final class ClientProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeUInt64Size(20, mvccReadPoint_);
       }
+      if (((bitField0_ & 0x00020000) == 0x00020000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(21, includeStartRow_);
+      }
+      if (((bitField0_ & 0x00040000) == 0x00040000)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBoolSize(22, includeStopRow_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -15193,6 +15271,16 @@ public final class ClientProtos {
         result = result && (getMvccReadPoint()
             == other.getMvccReadPoint());
       }
+      result = result && (hasIncludeStartRow() == other.hasIncludeStartRow());
+      if (hasIncludeStartRow()) {
+        result = result && (getIncludeStartRow()
+            == other.getIncludeStartRow());
+      }
+      result = result && (hasIncludeStopRow() == other.hasIncludeStopRow());
+      if (hasIncludeStopRow()) {
+        result = result && (getIncludeStopRow()
+            == other.getIncludeStopRow());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -15286,6 +15374,14 @@ public final class ClientProtos {
         hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getMvccReadPoint());
       }
+      if (hasIncludeStartRow()) {
+        hash = (37 * hash) + INCLUDE_START_ROW_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getIncludeStartRow());
+      }
+      if (hasIncludeStopRow()) {
+        hash = (37 * hash) + INCLUDE_STOP_ROW_FIELD_NUMBER;
+        hash = (53 * hash) + hashBoolean(getIncludeStopRow());
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -15471,6 +15567,10 @@ public final class ClientProtos {
         }
         mvccReadPoint_ = 0L;
         bitField0_ = (bitField0_ & ~0x00080000);
+        includeStartRow_ = true;
+        bitField0_ = (bitField0_ & ~0x00100000);
+        includeStopRow_ = false;
+        bitField0_ = (bitField0_ & ~0x00200000);
         return this;
       }
 
@@ -15602,6 +15702,14 @@ public final class ClientProtos {
           to_bitField0_ |= 0x00010000;
         }
         result.mvccReadPoint_ = mvccReadPoint_;
+        if (((from_bitField0_ & 0x00100000) == 0x00100000)) {
+          to_bitField0_ |= 0x00020000;
+        }
+        result.includeStartRow_ = includeStartRow_;
+        if (((from_bitField0_ & 0x00200000) == 0x00200000)) {
+          to_bitField0_ |= 0x00040000;
+        }
+        result.includeStopRow_ = includeStopRow_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -15747,6 +15855,12 @@ public final class ClientProtos {
         if (other.hasMvccReadPoint()) {
           setMvccReadPoint(other.getMvccReadPoint());
         }
+        if (other.hasIncludeStartRow()) {
+          setIncludeStartRow(other.getIncludeStartRow());
+        }
+        if (other.hasIncludeStopRow()) {
+          setIncludeStopRow(other.getIncludeStopRow());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -17272,6 +17386,72 @@ public final class ClientProtos {
         return this;
       }
 
+      // optional bool include_start_row = 21 [default = true];
+      private boolean includeStartRow_ = true;
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public boolean hasIncludeStartRow() {
+        return ((bitField0_ & 0x00100000) == 0x00100000);
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public boolean getIncludeStartRow() {
+        return includeStartRow_;
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public Builder setIncludeStartRow(boolean value) {
+        bitField0_ |= 0x00100000;
+        includeStartRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool include_start_row = 21 [default = true];</code>
+       */
+      public Builder clearIncludeStartRow() {
+        bitField0_ = (bitField0_ & ~0x00100000);
+        includeStartRow_ = true;
+        onChanged();
+        return this;
+      }
+
+      // optional bool include_stop_row = 22 [default = false];
+      private boolean includeStopRow_ ;
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public boolean hasIncludeStopRow() {
+        return ((bitField0_ & 0x00200000) == 0x00200000);
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public boolean getIncludeStopRow() {
+        return includeStopRow_;
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public Builder setIncludeStopRow(boolean value) {
+        bitField0_ |= 0x00200000;
+        includeStopRow_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional bool include_stop_row = 22 [default = false];</code>
+       */
+      public Builder clearIncludeStopRow() {
+        bitField0_ = (bitField0_ & ~0x00200000);
+        includeStopRow_ = false;
+        onChanged();
+        return this;
+      }
+
       // @@protoc_insertion_point(builder_scope:hbase.pb.Scan)
     }
 
@@ -39732,7 +39912,7 @@ public final class ClientProtos {
       "tion\030\003 \001(\0132\023.hbase.pb.Condition\022\023\n\013nonce" +
       "_group\030\004 \001(\004\"E\n\016MutateResponse\022 \n\006result" +
       "\030\001 \001(\0132\020.hbase.pb.Result\022\021\n\tprocessed\030\002 " +
-      "\001(\010\"\331\004\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
+      "\001(\010\"\233\005\n\004Scan\022 \n\006column\030\001 \003(\0132\020.hbase.pb." +
       "Column\022*\n\tattribute\030\002 \003(\0132\027.hbase.pb.Nam" +
       "eBytesPair\022\021\n\tstart_row\030\003 \001(\014\022\020\n\010stop_ro" +
       "w\030\004 \001(\014\022 \n\006filter\030\005 \001(\0132\020.hbase.pb.Filte" +
@@ -39747,97 +39927,98 @@ public final class ClientProtos {
       "aching\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 " +
       "\001(\010\0226\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.Co" +
       "lumnFamilyTimeRange\022\032\n\017mvcc_read_point\030\024",
-      " \001(\004:\0010\"\246\002\n\013ScanRequest\022)\n\006region\030\001 \001(\0132" +
-      "\031.hbase.pb.RegionSpecifier\022\034\n\004scan\030\002 \001(\013" +
-      "2\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016" +
-      "number_of_rows\030\004 \001(\r\022\025\n\rclose_scanner\030\005 " +
-      "\001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027client_han" +
-      "dles_partials\030\007 \001(\010\022!\n\031client_handles_he" +
-      "artbeats\030\010 \001(\010\022\032\n\022track_scan_metrics\030\t \001" +
-      "(\010\022\024\n\005renew\030\n \001(\010:\005false\"\266\002\n\014ScanRespons" +
-      "e\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nscanner_i" +
-      "d\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001",
-      "(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Result\022\r\n" +
-      "\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_result\030" +
-      "\007 \003(\010\022\036\n\026more_results_in_region\030\010 \001(\010\022\031\n" +
-      "\021heartbeat_message\030\t \001(\010\022+\n\014scan_metrics" +
-      "\030\n \001(\0132\025.hbase.pb.ScanMetrics\022\032\n\017mvcc_re" +
-      "ad_point\030\013 \001(\004:\0010\"\240\002\n\024BulkLoadHFileReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\022>\n\013family_path\030\002 \003(\0132).hbase.pb.Bu" +
-      "lkLoadHFileRequest.FamilyPath\022\026\n\016assign_" +
-      "seq_num\030\003 \001(\010\022+\n\010fs_token\030\004 \001(\0132\031.hbase.",
-      "pb.DelegationToken\022\022\n\nbulk_token\030\005 \001(\t\022\030" +
-      "\n\tcopy_file\030\006 \001(\010:\005false\032*\n\nFamilyPath\022\016" +
-      "\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoad" +
-      "HFileResponse\022\016\n\006loaded\030\001 \002(\010\"V\n\017Delegat" +
-      "ionToken\022\022\n\nidentifier\030\001 \001(\014\022\020\n\010password" +
-      "\030\002 \001(\014\022\014\n\004kind\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n" +
-      "\026PrepareBulkLoadRequest\022\'\n\ntable_name\030\001 " +
-      "\002(\0132\023.hbase.pb.TableName\022)\n\006region\030\002 \001(\013" +
-      "2\031.hbase.pb.RegionSpecifier\"-\n\027PrepareBu" +
-      "lkLoadResponse\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026Cl",
-      "eanupBulkLoadRequest\022\022\n\nbulk_token\030\001 \002(\t" +
-      "\022)\n\006region\030\002 \001(\0132\031.hbase.pb.RegionSpecif" +
-      "ier\"\031\n\027CleanupBulkLoadResponse\"a\n\026Coproc" +
-      "essorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014service" +
-      "_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022\017\n\007requ" +
-      "est\030\004 \002(\014\"B\n\030CoprocessorServiceResult\022&\n" +
-      "\005value\030\001 \001(\0132\027.hbase.pb.NameBytesPair\"v\n" +
-      "\031CoprocessorServiceRequest\022)\n\006region\030\001 \002" +
-      "(\0132\031.hbase.pb.RegionSpecifier\022.\n\004call\030\002 " +
-      "\002(\0132 .hbase.pb.CoprocessorServiceCall\"o\n",
-      "\032CoprocessorServiceResponse\022)\n\006region\030\001 " +
-      "\002(\0132\031.hbase.pb.RegionSpecifier\022&\n\005value\030" +
-      "\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001\n\006Actio" +
-      "n\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(\0132\027.hba" +
-      "se.pb.MutationProto\022\032\n\003get\030\003 \001(\0132\r.hbase" +
-      ".pb.Get\0226\n\014service_call\030\004 \001(\0132 .hbase.pb" +
-      ".CoprocessorServiceCall\"k\n\014RegionAction\022" +
-      ")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
-      "er\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hba" +
-      "se.pb.Action\"c\n\017RegionLoadStats\022\027\n\014memst",
-      "oreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:" +
-      "\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"j\n\024Mul" +
-      "tiRegionLoadStats\022)\n\006region\030\001 \003(\0132\031.hbas" +
-      "e.pb.RegionSpecifier\022\'\n\004stat\030\002 \003(\0132\031.hba" +
-      "se.pb.RegionLoadStats\"\336\001\n\021ResultOrExcept" +
-      "ion\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hba" +
-      "se.pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase." +
-      "pb.NameBytesPair\022:\n\016service_result\030\004 \001(\013" +
-      "2\".hbase.pb.CoprocessorServiceResult\0220\n\t" +
-      "loadStats\030\005 \001(\0132\031.hbase.pb.RegionLoadSta",
-      "tsB\002\030\001\"x\n\022RegionActionResult\0226\n\021resultOr" +
-      "Exception\030\001 \003(\0132\033.hbase.pb.ResultOrExcep" +
-      "tion\022*\n\texception\030\002 \001(\0132\027.hbase.pb.NameB" +
-      "ytesPair\"x\n\014MultiRequest\022,\n\014regionAction" +
-      "\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\nnonceG" +
-      "roup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb" +
-      ".Condition\"\226\001\n\rMultiResponse\0228\n\022regionAc" +
-      "tionResult\030\001 \003(\0132\034.hbase.pb.RegionAction" +
-      "Result\022\021\n\tprocessed\030\002 \001(\010\0228\n\020regionStati" +
-      "stics\030\003 \001(\0132\036.hbase.pb.MultiRegionLoadSt",
-      "ats*\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELI" +
-      "NE\020\0012\263\005\n\rClientService\0222\n\003Get\022\024.hbase.pb" +
-      ".GetRequest\032\025.hbase.pb.GetResponse\022;\n\006Mu" +
-      "tate\022\027.hbase.pb.MutateRequest\032\030.hbase.pb" +
-      ".MutateResponse\0225\n\004Scan\022\025.hbase.pb.ScanR" +
-      "equest\032\026.hbase.pb.ScanResponse\022P\n\rBulkLo" +
-      "adHFile\022\036.hbase.pb.BulkLoadHFileRequest\032" +
-      "\037.hbase.pb.BulkLoadHFileResponse\022V\n\017Prep" +
-      "areBulkLoad\022 .hbase.pb.PrepareBulkLoadRe" +
-      "quest\032!.hbase.pb.PrepareBulkLoadResponse",
-      "\022V\n\017CleanupBulkLoad\022 .hbase.pb.CleanupBu" +
-      "lkLoadRequest\032!.hbase.pb.CleanupBulkLoad" +
-      "Response\022X\n\013ExecService\022#.hbase.pb.Copro" +
-      "cessorServiceRequest\032$.hbase.pb.Coproces" +
-      "sorServiceResponse\022d\n\027ExecRegionServerSe" +
-      "rvice\022#.hbase.pb.CoprocessorServiceReque" +
-      "st\032$.hbase.pb.CoprocessorServiceResponse" +
-      "\0228\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbas" +
-      "e.pb.MultiResponseBB\n*org.apache.hadoop." +
-      "hbase.protobuf.generatedB\014ClientProtosH\001",
-      "\210\001\001\240\001\001"
+      " \001(\004:\0010\022\037\n\021include_start_row\030\025 \001(\010:\004true" +
+      "\022\037\n\020include_stop_row\030\026 \001(\010:\005false\"\246\002\n\013Sc" +
+      "anRequest\022)\n\006region\030\001 \001(\0132\031.hbase.pb.Reg" +
+      "ionSpecifier\022\034\n\004scan\030\002 \001(\0132\016.hbase.pb.Sc" +
+      "an\022\022\n\nscanner_id\030\003 \001(\004\022\026\n\016number_of_rows" +
+      "\030\004 \001(\r\022\025\n\rclose_scanner\030\005 \001(\010\022\025\n\rnext_ca" +
+      "ll_seq\030\006 \001(\004\022\037\n\027client_handles_partials\030" +
+      "\007 \001(\010\022!\n\031client_handles_heartbeats\030\010 \001(\010" +
+      "\022\032\n\022track_scan_metrics\030\t \001(\010\022\024\n\005renew\030\n " +
+      "\001(\010:\005false\"\266\002\n\014ScanResponse\022\030\n\020cells_per",
+      "_result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014mor" +
+      "e_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030" +
+      "\005 \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037" +
+      "\n\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_" +
+      "results_in_region\030\010 \001(\010\022\031\n\021heartbeat_mes" +
+      "sage\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase" +
+      ".pb.ScanMetrics\022\032\n\017mvcc_read_point\030\013 \001(\004" +
+      ":\0010\"\240\002\n\024BulkLoadHFileRequest\022)\n\006region\030\001" +
+      " \002(\0132\031.hbase.pb.RegionSpecifier\022>\n\013famil" +
+      "y_path\030\002 \003(\0132).hbase.pb.BulkLoadHFileReq",
+      "uest.FamilyPath\022\026\n\016assign_seq_num\030\003 \001(\010\022" +
+      "+\n\010fs_token\030\004 \001(\0132\031.hbase.pb.DelegationT" +
+      "oken\022\022\n\nbulk_token\030\005 \001(\t\022\030\n\tcopy_file\030\006 " +
+      "\001(\010:\005false\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" +
+      "\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" +
+      "\016\n\006loaded\030\001 \002(\010\"V\n\017DelegationToken\022\022\n\nid" +
+      "entifier\030\001 \001(\014\022\020\n\010password\030\002 \001(\014\022\014\n\004kind" +
+      "\030\003 \001(\t\022\017\n\007service\030\004 \001(\t\"l\n\026PrepareBulkLo" +
+      "adRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
+      ".TableName\022)\n\006region\030\002 \001(\0132\031.hbase.pb.Re",
+      "gionSpecifier\"-\n\027PrepareBulkLoadResponse" +
+      "\022\022\n\nbulk_token\030\001 \002(\t\"W\n\026CleanupBulkLoadR" +
+      "equest\022\022\n\nbulk_token\030\001 \002(\t\022)\n\006region\030\002 \001" +
+      "(\0132\031.hbase.pb.RegionSpecifier\"\031\n\027Cleanup" +
+      "BulkLoadResponse\"a\n\026CoprocessorServiceCa" +
+      "ll\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n" +
+      "\013method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030C" +
+      "oprocessorServiceResult\022&\n\005value\030\001 \001(\0132\027" +
+      ".hbase.pb.NameBytesPair\"v\n\031CoprocessorSe" +
+      "rviceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.",
+      "RegionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb" +
+      ".CoprocessorServiceCall\"o\n\032CoprocessorSe" +
+      "rviceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb" +
+      ".RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase." +
+      "pb.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001" +
+      "(\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.Mutation" +
+      "Proto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014ser" +
+      "vice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSe" +
+      "rviceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(" +
+      "\0132\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002",
+      " \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c" +
+      "\n\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:" +
+      "\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compacti" +
+      "onPressure\030\003 \001(\005:\0010\"j\n\024MultiRegionLoadSt" +
+      "ats\022)\n\006region\030\001 \003(\0132\031.hbase.pb.RegionSpe" +
+      "cifier\022\'\n\004stat\030\002 \003(\0132\031.hbase.pb.RegionLo" +
+      "adStats\"\336\001\n\021ResultOrException\022\r\n\005index\030\001" +
+      " \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.pb.Result\022*" +
+      "\n\texception\030\003 \001(\0132\027.hbase.pb.NameBytesPa" +
+      "ir\022:\n\016service_result\030\004 \001(\0132\".hbase.pb.Co",
+      "processorServiceResult\0220\n\tloadStats\030\005 \001(" +
+      "\0132\031.hbase.pb.RegionLoadStatsB\002\030\001\"x\n\022Regi" +
+      "onActionResult\0226\n\021resultOrException\030\001 \003(" +
+      "\0132\033.hbase.pb.ResultOrException\022*\n\texcept" +
+      "ion\030\002 \001(\0132\027.hbase.pb.NameBytesPair\"x\n\014Mu" +
+      "ltiRequest\022,\n\014regionAction\030\001 \003(\0132\026.hbase" +
+      ".pb.RegionAction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\t" +
+      "condition\030\003 \001(\0132\023.hbase.pb.Condition\"\226\001\n" +
+      "\rMultiResponse\0228\n\022regionActionResult\030\001 \003" +
+      "(\0132\034.hbase.pb.RegionActionResult\022\021\n\tproc",
+      "essed\030\002 \001(\010\0228\n\020regionStatistics\030\003 \001(\0132\036." +
+      "hbase.pb.MultiRegionLoadStats*\'\n\013Consist" +
+      "ency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\263\005\n\rClien" +
+      "tService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025." +
+      "hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase.p" +
+      "b.MutateRequest\032\030.hbase.pb.MutateRespons" +
+      "e\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbase" +
+      ".pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hbas" +
+      "e.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bul" +
+      "kLoadHFileResponse\022V\n\017PrepareBulkLoad\022 .",
+      "hbase.pb.PrepareBulkLoadRequest\032!.hbase." +
+      "pb.PrepareBulkLoadResponse\022V\n\017CleanupBul" +
+      "kLoad\022 .hbase.pb.CleanupBulkLoadRequest\032" +
+      "!.hbase.pb.CleanupBulkLoadResponse\022X\n\013Ex" +
+      "ecService\022#.hbase.pb.CoprocessorServiceR" +
+      "equest\032$.hbase.pb.CoprocessorServiceResp" +
+      "onse\022d\n\027ExecRegionServerService\022#.hbase." +
+      "pb.CoprocessorServiceRequest\032$.hbase.pb." +
+      "CoprocessorServiceResponse\0228\n\005Multi\022\026.hb" +
+      "ase.pb.MultiRequest\032\027.hbase.pb.MultiResp",
+      "onseBB\n*org.apache.hadoop.hbase.protobuf" +
+      ".generatedB\014ClientProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -39927,7 +40108,7 @@ public final class ClientProtos {
           internal_static_hbase_pb_Scan_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_Scan_descriptor,
-              new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", });
+              new java.lang.String[] { "Column", "Attribute", "StartRow", "StopRow", "Filter", "TimeRange", "MaxVersions", "CacheBlocks", "BatchSize", "MaxResultSize", "StoreLimit", "StoreOffset", "LoadColumnFamiliesOnDemand", "Small", "Reversed", "Consistency", "Caching", "AllowPartialResults", "CfTimeRange", "MvccReadPoint", "IncludeStartRow", "IncludeStopRow", });
           internal_static_hbase_pb_ScanRequest_descriptor =
             getDescriptor().getMessageTypes().get(12);
           internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-protocol/src/main/protobuf/Client.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Client.proto b/hbase-protocol/src/main/protobuf/Client.proto
index 8bce92f..ae932f7 100644
--- a/hbase-protocol/src/main/protobuf/Client.proto
+++ b/hbase-protocol/src/main/protobuf/Client.proto
@@ -256,6 +256,8 @@ message Scan {
   optional bool allow_partial_results = 18;
   repeated ColumnFamilyTimeRange cf_time_range = 19;
   optional uint64 mvcc_read_point = 20 [default = 0];
+  optional bool include_start_row = 21 [default = true];
+  optional bool include_stop_row = 22 [default = false];
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
index 2217034..db0ad01 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultStoreFileManager.java
@@ -179,8 +179,8 @@ class DefaultStoreFileManager implements StoreFileManager {
   }
 
   @Override
-  public final Collection<StoreFile> getFilesForScanOrGet(boolean isGet,
-      byte[] startRow, byte[] stopRow) {
+  public final Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow,
+      byte[] stopRow, boolean includeStopRow) {
     // We cannot provide any useful input and already have the files sorted by seqNum.
     return getStorefiles();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e11a31c..a5172bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5760,8 +5760,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     protected Cell joinedContinuationRow = null;
     private boolean filterClosed = false;
 
-    protected final int isScan;
     protected final byte[] stopRow;
+    protected final boolean includeStopRow;
     protected final HRegion region;
     protected final CellComparator comparator;
 
@@ -5797,15 +5797,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
        */
       defaultScannerContext = ScannerContext.newBuilder()
           .setBatchLimit(scan.getBatch()).build();
-
-      if (Bytes.equals(scan.getStopRow(), HConstants.EMPTY_END_ROW) && !scan.isGetScan()) {
-        this.stopRow = null;
-      } else {
-        this.stopRow = scan.getStopRow();
-      }
-      // If we are doing a get, we want to be [startRow,endRow]. Normally
-      // it is [startRow,endRow) and if startRow=endRow we get nothing.
-      this.isScan = scan.isGetScan() ? 1 : 0;
+      this.stopRow = scan.getStopRow();
+      this.includeStopRow = scan.includeStopRow();
 
       // synchronize on scannerReadPoints so that nobody calculates
       // getSmallestReadPoint, before scannerReadPoints is updated.
@@ -6118,7 +6111,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // Let's see what we have in the storeHeap.
         Cell current = this.storeHeap.peek();
 
-        boolean stopRow = isStopRow(current);
+        boolean shouldStop = shouldStop(current);
         // When has filter row is true it means that the all the cells for a particular row must be
         // read before a filtering decision can be made. This means that filters where hasFilterRow
         // run the risk of enLongAddering out of memory errors in the case that they are applied to a
@@ -6142,7 +6135,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // If not, then it's main path - getting results from storeHeap.
         if (joinedContinuationRow == null) {
           // First, check if we are at a stop row. If so, there are no more results.
-          if (stopRow) {
+          if (shouldStop) {
             if (hasFilterRow) {
               filter.filterRowCells(results);
             }
@@ -6182,7 +6175,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           }
 
           Cell nextKv = this.storeHeap.peek();
-          stopRow = nextKv == null || isStopRow(nextKv);
+          shouldStop = shouldStop(nextKv);
           // save that the row was empty before filters applied to it.
           final boolean isEmptyRow = results.isEmpty();
 
@@ -6219,7 +6212,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
 
             // This row was totally filtered out, if this is NOT the last row,
             // we should continue on. Otherwise, nothing else to do.
-            if (!stopRow) continue;
+            if (!shouldStop) continue;
             return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
           }
 
@@ -6260,10 +6253,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           if (!moreRows) {
             return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
           }
-          if (!stopRow) continue;
+          if (!shouldStop) continue;
         }
 
-        if (stopRow) {
+        if (shouldStop) {
           return scannerContext.setScannerState(NextState.NO_MORE_VALUES).hasMoreValues();
         } else {
           return scannerContext.setScannerState(NextState.MORE_VALUES).hasMoreValues();
@@ -6343,10 +6336,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
               .postScannerFilterRow(this, curRowCell);
     }
 
-    protected boolean isStopRow(Cell currentRowCell) {
-      return currentRowCell == null
-          || (stopRow != null && comparator.compareRows(currentRowCell, stopRow, 0, stopRow
-          .length) >= isScan);
+    protected boolean shouldStop(Cell currentRowCell) {
+      if (currentRowCell == null) {
+        return true;
+      }
+      if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_END_ROW)) {
+        return false;
+      }
+      int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
+      return c > 0 || (c == 0 && !includeStopRow);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 007c28b..a54b789 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -18,6 +18,13 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableCollection;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
@@ -90,13 +97,6 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableCollection;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
-import com.google.common.collect.Sets;
-
 /**
  * A Store holds a column family in a Region.  Its a memstore and a set of zero
  * or more StoreFiles, which stretch backwards over time.
@@ -1138,20 +1138,19 @@ public class HStore implements Store {
   }
 
   /**
-   * Get all scanners with no filtering based on TTL (that happens further down
-   * the line).
+   * Get all scanners with no filtering based on TTL (that happens further down the line).
    * @return all scanners for this store
    */
   @Override
-  public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet,
-      boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
-      byte[] stopRow, long readPt) throws IOException {
+  public List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread,
+      boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow,
+      byte[] stopRow, boolean includeStopRow, long readPt) throws IOException {
     Collection<StoreFile> storeFilesToScan;
     List<KeyValueScanner> memStoreScanners;
     this.lock.readLock().lock();
     try {
-      storeFilesToScan =
-          this.storeEngine.getStoreFileManager().getFilesForScanOrGet(isGet, startRow, stopRow);
+      storeFilesToScan = this.storeEngine.getStoreFileManager().getFilesForScan(startRow,
+        includeStartRow, stopRow, includeStopRow);
       memStoreScanners = this.memstore.getScanners(readPt);
     } finally {
       this.lock.readLock().unlock();
@@ -1163,9 +1162,8 @@ public class HStore implements Store {
     // but now we get them in ascending order, which I think is
     // actually more correct, since memstore get put at the end.
     List<StoreFileScanner> sfScanners = StoreFileScanner.getScannersForStoreFiles(storeFilesToScan,
-        cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore());
-    List<KeyValueScanner> scanners =
-      new ArrayList<KeyValueScanner>(sfScanners.size()+1);
+      cacheBlocks, usePread, isCompaction, false, matcher, readPt, isPrimaryReplicaStore());
+    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(sfScanners.size() + 1);
     scanners.addAll(sfScanners);
     // Then the memstore scanners
     scanners.addAll(memStoreScanners);
@@ -1174,8 +1172,9 @@ public class HStore implements Store {
 
   @Override
   public List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks,
-      boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
-      byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException {
+      boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
+      boolean includeStartRow, byte[] stopRow, boolean includeStopRow, long readPt,
+      boolean includeMemstoreScanner) throws IOException {
     List<KeyValueScanner> memStoreScanners = null;
     if (includeMemstoreScanner) {
       this.lock.readLock().lock();
@@ -2234,8 +2233,8 @@ public class HStore implements Store {
         if (LOG.isInfoEnabled()) {
           LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
             " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
-            ", sequenceid=" +  + storeFile.getReader().getSequenceID() +
-            ", filesize=" + StringUtils.humanReadableInt(storeFile.getReader().length()));
+              ", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="
+              + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
index 487375a..5ffce79 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ReversedRegionScannerImpl.java
@@ -23,9 +23,11 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * ReversibleRegionScannerImpl extends from RegionScannerImpl, and is used to
@@ -56,10 +58,15 @@ class ReversedRegionScannerImpl extends RegionScannerImpl {
   }
 
   @Override
-  protected boolean isStopRow(Cell currentRowCell) {
-    return currentRowCell == null
-        || (super.stopRow != null && comparator.compareRows(currentRowCell, stopRow, 0,
-            stopRow.length) <= super.isScan);
+  protected boolean shouldStop(Cell currentRowCell) {
+    if (currentRowCell == null) {
+      return true;
+    }
+    if (stopRow == null || Bytes.equals(stopRow, HConstants.EMPTY_START_ROW)) {
+      return false;
+    }
+    int c = comparator.compareRows(currentRowCell, stopRow, 0, stopRow.length);
+    return c < 0 || (c == 0 && !includeStopRow);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
index 30e6a74..bb9e20a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java
@@ -81,35 +81,45 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
       throws IOException;
 
   /**
-   * Get all scanners with no filtering based on TTL (that happens further down
-   * the line).
-   * @param cacheBlocks
-   * @param isGet
-   * @param usePread
-   * @param isCompaction
-   * @param matcher
-   * @param startRow
-   * @param stopRow
-   * @param readPt
+   * Get all scanners with no filtering based on TTL (that happens further down the line).
+   * @param cacheBlocks cache the blocks or not
+   * @param usePread true to use pread, false if not
+   * @param isCompaction true if the scanner is created for compaction
+   * @param matcher the scan query matcher
+   * @param startRow the start row
+   * @param stopRow the stop row
+   * @param readPt the read point of the current scan
    * @return all scanners for this store
    */
-  List<KeyValueScanner> getScanners(
-    boolean cacheBlocks,
-    boolean isGet,
-    boolean usePread,
-    boolean isCompaction,
-    ScanQueryMatcher matcher,
-    byte[] startRow,
-    byte[] stopRow,
-    long readPt
-  ) throws IOException;
+  default List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean isGet, boolean usePread,
+      boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, byte[] stopRow, long readPt)
+      throws IOException {
+    return getScanners(cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow, false,
+      readPt);
+  }
+
+  /**
+   * Get all scanners with no filtering based on TTL (that happens further down the line).
+   * @param cacheBlocks cache the blocks or not
+   * @param usePread true to use pread, false if not
+   * @param isCompaction true if the scanner is created for compaction
+   * @param matcher the scan query matcher
+   * @param startRow the start row
+   * @param includeStartRow true to include start row, false if not
+   * @param stopRow the stop row
+   * @param includeStopRow true to include stop row, false if not
+   * @param readPt the read point of the current scan
+   * @return all scanners for this store
+   */
+  List<KeyValueScanner> getScanners(boolean cacheBlocks, boolean usePread, boolean isCompaction,
+      ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow, byte[] stopRow,
+      boolean includeStopRow, long readPt) throws IOException;
 
   /**
    * Create scanners on the given files and if needed on the memstore with no filtering based on TTL
    * (that happens further down the line).
    * @param files the list of files on which the scanners has to be created
    * @param cacheBlocks cache the blocks or not
-   * @param isGet true if it is get, false if not
    * @param usePread true to use pread, false if not
    * @param isCompaction true if the scanner is created for compaction
    * @param matcher the scan query matcher
@@ -119,9 +129,34 @@ public interface Store extends HeapSize, StoreConfigInformation, PropagatingConf
    * @param includeMemstoreScanner true if memstore has to be included
    * @return scanners on the given files and on the memstore if specified
    */
-   List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks, boolean isGet,
-          boolean usePread, boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow,
-          byte[] stopRow, long readPt, boolean includeMemstoreScanner) throws IOException;
+  default List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks,
+      boolean isGet, boolean usePread, boolean isCompaction, ScanQueryMatcher matcher,
+      byte[] startRow, byte[] stopRow, long readPt, boolean includeMemstoreScanner)
+      throws IOException {
+    return getScanners(files, cacheBlocks, usePread, isCompaction, matcher, startRow, true, stopRow,
+      false, readPt, includeMemstoreScanner);
+  }
+
+  /**
+   * Create scanners on the given files and if needed on the memstore with no filtering based on TTL
+   * (that happens further down the line).
+   * @param files the list of files on which the scanners has to be created
+   * @param cacheBlocks ache the blocks or not
+   * @param usePread true to use pread, false if not
+   * @param isCompaction true if the scanner is created for compaction
+   * @param matcher the scan query matcher
+   * @param startRow the start row
+   * @param includeStartRow true to include start row, false if not
+   * @param stopRow the stop row
+   * @param includeStopRow true to include stop row, false if not
+   * @param readPt the read point of the current scan
+   * @param includeMemstoreScanner true if memstore has to be included
+   * @return scanners on the given files and on the memstore if specified
+   */
+  List<KeyValueScanner> getScanners(List<StoreFile> files, boolean cacheBlocks, boolean usePread,
+      boolean isCompaction, ScanQueryMatcher matcher, byte[] startRow, boolean includeStartRow,
+      byte[] stopRow, boolean includeStopRow, long readPt, boolean includeMemstoreScanner)
+      throws IOException;
 
   ScanInfo getScanInfo();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
index cb19267..933849c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileManager.java
@@ -105,14 +105,12 @@ public interface StoreFileManager {
 
   /**
    * Gets the store files to scan for a Scan or Get request.
-   * @param isGet Whether it's a get.
    * @param startRow Start row of the request.
    * @param stopRow Stop row of the request.
    * @return The list of files that are to be read for this request.
    */
-  Collection<StoreFile> getFilesForScanOrGet(
-    boolean isGet, byte[] startRow, byte[] stopRow
-  );
+  Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow, byte[] stopRow,
+      boolean includeStopRow);
 
   /**
    * Gets initial, full list of candidate store files to check for row-key-before.

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
index df1ddf2..1b3c9f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StripeStoreFileManager.java
@@ -289,8 +289,8 @@ public class StripeStoreFileManager
   }
 
   @Override
-  public Collection<StoreFile> getFilesForScanOrGet(
-      boolean isGet, byte[] startRow, byte[] stopRow) {
+  public Collection<StoreFile> getFilesForScan(byte[] startRow, boolean includeStartRow,
+      byte[] stopRow, boolean includeStopRow) {
     if (state.stripeFiles.isEmpty()) {
       return state.level0Files; // There's just L0.
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java
index d3224dc..95df581 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/CompactionScanQueryMatcher.java
@@ -17,10 +17,11 @@
  */
 package org.apache.hadoop.hbase.regionserver.querymatcher;
 
+import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
+
 import java.io.IOException;
 
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -45,7 +46,7 @@ public abstract class CompactionScanQueryMatcher extends ScanQueryMatcher {
 
   protected CompactionScanQueryMatcher(ScanInfo scanInfo, DeleteTracker deletes,
       long readPointToUse, long oldestUnexpiredTS, long now) {
-    super(HConstants.EMPTY_START_ROW, scanInfo,
+    super(createStartKeyFromRow(EMPTY_START_ROW, scanInfo), scanInfo,
         new ScanWildcardColumnTracker(scanInfo.getMinVersions(), scanInfo.getMaxVersions(),
             oldestUnexpiredTS),
         oldestUnexpiredTS, now);

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
index ac6aa03..0ec3444 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
@@ -116,7 +116,8 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher {
   private LegacyScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
       boolean hasNullColumn, DeleteTracker deletes, ScanType scanType, long readPointToUse,
       long earliestPutTs, long oldestUnexpiredTS, long now) {
-    super(scan.getStartRow(), scanInfo, columns, oldestUnexpiredTS, now);
+    super(createStartKeyFromRow(scan.getStartRow(), scanInfo), scanInfo, columns, oldestUnexpiredTS,
+        now);
     TimeRange timeRange = scan.getColumnFamilyTimeRange().get(scanInfo.getFamily());
     if (timeRange == null) {
       this.tr = scan.getTimeRange();

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java
index 894bbec..8f5059f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/NormalUserScanQueryMatcher.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
  * Query matcher for normal user scan.
  */
 @InterfaceAudience.Private
-public class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
+public abstract class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
 
   /** Keeps track of deletes */
   private final DeleteTracker deletes;
@@ -91,17 +91,45 @@ public class NormalUserScanQueryMatcher extends UserScanQueryMatcher {
       RegionCoprocessorHost regionCoprocessorHost) throws IOException {
     DeleteTracker deletes = instantiateDeleteTracker(regionCoprocessorHost);
     if (scan.isReversed()) {
-      return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
-          oldestUnexpiredTS, now) {
+      if (scan.includeStopRow()) {
+        return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
+            oldestUnexpiredTS, now) {
 
-        @Override
-        protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
-          return cmpToStopRow > 0;
-        }
-      };
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow >= 0;
+          }
+        };
+      } else {
+        return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
+            oldestUnexpiredTS, now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow > 0;
+          }
+        };
+      }
     } else {
-      return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
-          oldestUnexpiredTS, now);
+      if (scan.includeStopRow()) {
+        return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
+            oldestUnexpiredTS, now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow <= 0;
+          }
+        };
+      } else {
+        return new NormalUserScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, deletes,
+            oldestUnexpiredTS, now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow < 0;
+          }
+        };
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java
index 84484ed..b1f20e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/RawScanQueryMatcher.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hbase.regionserver.ScanInfo;
  * Query matcher for raw scan.
  */
 @InterfaceAudience.Private
-public class RawScanQueryMatcher extends UserScanQueryMatcher {
+public abstract class RawScanQueryMatcher extends UserScanQueryMatcher {
 
   protected RawScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
       boolean hasNullColumn, long oldestUnexpiredTS, long now) {
@@ -63,17 +63,45 @@ public class RawScanQueryMatcher extends UserScanQueryMatcher {
   public static RawScanQueryMatcher create(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
       boolean hasNullColumn, long oldestUnexpiredTS, long now) {
     if (scan.isReversed()) {
-      return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
-          now) {
+      if (scan.includeStopRow()) {
+        return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
+            now) {
 
-        @Override
-        protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
-          return cmpToStopRow > 0;
-        }
-      };
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow >= 0;
+          }
+        };
+      } else {
+        return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
+            now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow > 0;
+          }
+        };
+      }
     } else {
-      return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
-          now);
+      if (scan.includeStopRow()) {
+        return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
+            now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow <= 0;
+          }
+        };
+      } else {
+        return new RawScanQueryMatcher(scan, scanInfo, columns, hasNullColumn, oldestUnexpiredTS,
+            now) {
+
+          @Override
+          protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
+            return cmpToStopRow < 0;
+          }
+        };
+      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
index 82aae6c..48563e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/ScanQueryMatcher.java
@@ -127,10 +127,10 @@ public abstract class ScanQueryMatcher implements ShipperListener {
 
   protected boolean stickyNextRow;
 
-  protected ScanQueryMatcher(byte[] startRow, ScanInfo scanInfo, ColumnTracker columns,
+  protected ScanQueryMatcher(Cell startKey, ScanInfo scanInfo, ColumnTracker columns,
       long oldestUnexpiredTS, long now) {
     this.rowComparator = scanInfo.getComparator();
-    this.startKey = CellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily());
+    this.startKey = startKey;
     this.oldestUnexpiredTS = oldestUnexpiredTS;
     this.now = now;
     this.columns = columns;
@@ -345,6 +345,10 @@ public abstract class ScanQueryMatcher implements ShipperListener {
     }
   }
 
+  protected static Cell createStartKeyFromRow(byte[] startRow, ScanInfo scanInfo) {
+    return CellUtil.createFirstDeleteFamilyCellOnRow(startRow, scanInfo.getFamily());
+  }
+
   protected static DeleteTracker instantiateDeleteTracker(RegionCoprocessorHost host)
       throws IOException {
     DeleteTracker tracker = new ScanDeleteTracker();

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
index db85d0f..fcda4a6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.NavigableSet;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -50,9 +51,17 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
 
   protected final TimeRange tr;
 
+  private static Cell createStartKey(Scan scan, ScanInfo scanInfo) {
+    if (scan.includeStartRow()) {
+      return createStartKeyFromRow(scan.getStartRow(), scanInfo);
+    } else {
+      return CellUtil.createLastOnRow(scan.getStartRow());
+    }
+  }
+
   protected UserScanQueryMatcher(Scan scan, ScanInfo scanInfo, ColumnTracker columns,
       boolean hasNullColumn, long oldestUnexpiredTS, long now) {
-    super(scan.getStartRow(), scanInfo, columns, oldestUnexpiredTS, now);
+    super(createStartKey(scan, scanInfo), scanInfo, columns, oldestUnexpiredTS, now);
     this.hasNullColumn = hasNullColumn;
     this.filter = scan.getFilter();
     this.stopRow = scan.getStopRow();
@@ -163,9 +172,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
 
   protected abstract boolean isGet();
 
-  protected boolean moreRowsMayExistsAfter(int cmpToStopRow) {
-    return cmpToStopRow < 0;
-  }
+  protected abstract boolean moreRowsMayExistsAfter(int cmpToStopRow);
 
   @Override
   public boolean moreRowsMayExistAfter(Cell cell) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
index 5614d8e..a1d926d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/AbstractTestAsyncTableScan.java
@@ -120,7 +120,7 @@ public abstract class AbstractTestAsyncTableScan {
   public void testScanNoStopKey() throws Exception {
     int start = 345;
     List<Result> results =
-        doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start))));
+        doScan(createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))));
     assertEquals(COUNT - start, results.size());
     IntStream.range(0, COUNT - start).forEach(i -> assertResultEquals(results.get(i), start + i));
   }
@@ -129,44 +129,66 @@ public abstract class AbstractTestAsyncTableScan {
   public void testReverseScanNoStopKey() throws Exception {
     int start = 765;
     List<Result> results = doScan(
-      createScan().setStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true));
+      createScan().withStartRow(Bytes.toBytes(String.format("%03d", start))).setReversed(true));
     assertEquals(start + 1, results.size());
     IntStream.range(0, start + 1).forEach(i -> assertResultEquals(results.get(i), start - i));
   }
 
-  private void testScan(int start, int stop) throws Exception {
-    List<Result> results =
-        doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start)))
-            .setStopRow(Bytes.toBytes(String.format("%03d", stop))));
-    assertEquals(stop - start, results.size());
-    IntStream.range(0, stop - start).forEach(i -> assertResultEquals(results.get(i), start + i));
+  private void testScan(int start, boolean startInclusive, int stop, boolean stopInclusive)
+      throws Exception {
+    List<Result> results = doScan(
+      createScan().withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
+          .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive));
+    int actualStart = startInclusive ? start : start + 1;
+    int actualStop = stopInclusive ? stop + 1 : stop;
+    assertEquals(actualStop - actualStart, results.size());
+    IntStream.range(0, actualStop - actualStart)
+        .forEach(i -> assertResultEquals(results.get(i), actualStart + i));
   }
 
-  private void testReversedScan(int start, int stop) throws Exception {
-    List<Result> results =
-        doScan(createScan().setStartRow(Bytes.toBytes(String.format("%03d", start)))
-            .setStopRow(Bytes.toBytes(String.format("%03d", stop))).setReversed(true));
-    assertEquals(start - stop, results.size());
-    IntStream.range(0, start - stop).forEach(i -> assertResultEquals(results.get(i), start - i));
+  private void testReversedScan(int start, boolean startInclusive, int stop, boolean stopInclusive)
+      throws Exception {
+    List<Result> results = doScan(createScan()
+        .withStartRow(Bytes.toBytes(String.format("%03d", start)), startInclusive)
+        .withStopRow(Bytes.toBytes(String.format("%03d", stop)), stopInclusive).setReversed(true));
+    int actualStart = startInclusive ? start : start - 1;
+    int actualStop = stopInclusive ? stop - 1 : stop;
+    assertEquals(actualStart - actualStop, results.size());
+    IntStream.range(0, actualStart - actualStop)
+        .forEach(i -> assertResultEquals(results.get(i), actualStart - i));
   }
 
   @Test
   public void testScanWithStartKeyAndStopKey() throws Exception {
-    testScan(345, 567);
+    testScan(1, true, 998, false); // from first region to last region
+    testScan(123, true, 345, true);
+    testScan(234, true, 456, false);
+    testScan(345, false, 567, true);
+    testScan(456, false, 678, false);
   }
 
   @Test
   public void testReversedScanWithStartKeyAndStopKey() throws Exception {
-    testReversedScan(765, 543);
+    testReversedScan(998, true, 1, false); // from first region to first region
+    testReversedScan(543, true, 321, true);
+    testReversedScan(654, true, 432, false);
+    testReversedScan(765, false, 543, true);
+    testReversedScan(876, false, 654, false);
   }
 
   @Test
   public void testScanAtRegionBoundary() throws Exception {
-    testScan(222, 333);
+    testScan(222, true, 333, true);
+    testScan(333, true, 444, false);
+    testScan(444, false, 555, true);
+    testScan(555, false, 666, false);
   }
 
   @Test
   public void testReversedScanAtRegionBoundary() throws Exception {
-    testScan(222, 333);
+    testReversedScan(333, true, 222, true);
+    testReversedScan(444, true, 333, false);
+    testReversedScan(555, false, 444, true);
+    testReversedScan(666, false, 555, false);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
index 1267d5f..270e3e1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Queue;
+import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -92,13 +93,17 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
     }
   }
 
-  @Parameter
+  @Parameter(0)
+  public String scanType;
+
+  @Parameter(1)
   public Supplier<Scan> scanCreater;
 
-  @Parameters
+  @Parameters(name = "{index}: type={0}")
   public static List<Object[]> params() {
-    return Arrays.asList(new Supplier<?>[] { TestRawAsyncTableScan::createNormalScan },
-      new Supplier<?>[] { TestRawAsyncTableScan::createBatchScan });
+    Supplier<Scan> normal = TestRawAsyncTableScan::createNormalScan;
+    Supplier<Scan> batch = TestRawAsyncTableScan::createBatchScan;
+    return Arrays.asList(new Object[] { "normal", normal }, new Object[] { "batch", batch });
   }
 
   private static Scan createNormalScan() {
@@ -117,7 +122,10 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
   @Override
   protected List<Result> doScan(Scan scan) throws Exception {
     SimpleRawScanResultConsumer scanConsumer = new SimpleRawScanResultConsumer();
-    ASYNC_CONN.getRawTable(TABLE_NAME).scan(scan, scanConsumer);
+    RawAsyncTable table = ASYNC_CONN.getRawTable(TABLE_NAME);
+    table.setScanTimeout(1, TimeUnit.HOURS);
+    table.setReadRpcTimeout(1, TimeUnit.HOURS);
+    table.scan(scan, scanConsumer);
     List<Result> results = new ArrayList<>();
     for (Result result; (result = scanConsumer.take()) != null;) {
       results.add(result);

http://git-wip-us.apache.org/repos/asf/hbase/blob/05b1d918/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java
index e9d34ed..c533257 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStripeStoreFileManager.java
@@ -89,15 +89,15 @@ public class TestStripeStoreFileManager {
     MockStoreFile sf = createFile();
     manager.insertNewFiles(al(sf));
     assertEquals(1, manager.getStorefileCount());
-    Collection<StoreFile> filesForGet = manager.getFilesForScanOrGet(true, KEY_A, KEY_A);
+    Collection<StoreFile> filesForGet = manager.getFilesForScan(KEY_A, true, KEY_A, true);
     assertEquals(1, filesForGet.size());
     assertTrue(filesForGet.contains(sf));
 
     // Add some stripes and make sure we get this file for every stripe.
     manager.addCompactionResults(al(), al(createFile(OPEN_KEY, KEY_B),
         createFile(KEY_B, OPEN_KEY)));
-    assertTrue(manager.getFilesForScanOrGet(true, KEY_A, KEY_A).contains(sf));
-    assertTrue(manager.getFilesForScanOrGet(true, KEY_C, KEY_C).contains(sf));
+    assertTrue(manager.getFilesForScan(KEY_A, true, KEY_A, true).contains(sf));
+    assertTrue(manager.getFilesForScan(KEY_C, true, KEY_C, true).contains(sf));
   }
 
   @Test
@@ -290,10 +290,9 @@ public class TestStripeStoreFileManager {
     verifyGetAndScanScenario(manager, keyAfter(KEY_B), keyAfter(KEY_C), sf0, sfC, sfD);
   }
 
-  private void verifyGetAndScanScenario(StripeStoreFileManager manager,
-      byte[] start, byte[] end, StoreFile... results) throws Exception {
-    verifyGetOrScanScenario(manager, true, start, end, results);
-    verifyGetOrScanScenario(manager, false, start, end, results);
+  private void verifyGetAndScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
+      StoreFile... results) throws Exception {
+    verifyGetOrScanScenario(manager, start, end, results);
   }
 
   @Test
@@ -548,16 +547,16 @@ public class TestStripeStoreFileManager {
     verifyAllFiles(manager, allFiles); // must have the same files.
   }
 
-  private void verifyGetOrScanScenario(StripeStoreFileManager manager, boolean isGet,
-      byte[] start, byte[] end, StoreFile... results) throws Exception {
-    verifyGetOrScanScenario(manager, isGet, start, end, Arrays.asList(results));
+  private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
+      StoreFile... results) throws Exception {
+    verifyGetOrScanScenario(manager, start, end, Arrays.asList(results));
   }
 
-  private void verifyGetOrScanScenario(StripeStoreFileManager manager, boolean isGet,
-      byte[] start, byte[] end, Collection<StoreFile> results) throws Exception {
+  private void verifyGetOrScanScenario(StripeStoreFileManager manager, byte[] start, byte[] end,
+      Collection<StoreFile> results) throws Exception {
     start = start != null ? start : HConstants.EMPTY_START_ROW;
     end = end != null ? end : HConstants.EMPTY_END_ROW;
-    Collection<StoreFile> sfs = manager.getFilesForScanOrGet(isGet, start, end);
+    Collection<StoreFile> sfs = manager.getFilesForScan(start, true, end, false);
     assertEquals(results.size(), sfs.size());
     for (StoreFile result : results) {
       assertTrue(sfs.contains(result));
@@ -566,7 +565,7 @@ public class TestStripeStoreFileManager {
 
   private void verifyAllFiles(
       StripeStoreFileManager manager, Collection<StoreFile> results) throws Exception {
-    verifyGetOrScanScenario(manager, false, null, null, results);
+    verifyGetOrScanScenario(manager, null, null, results);
   }
 
   // TODO: replace with Mockito?


[17/50] [abbrv] hbase git commit: HBASE-17335 enable/disable replication peer requests should be routed through master

Posted by sy...@apache.org.
HBASE-17335 enable/disable replication peer requests should be routed through master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3f2bec0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3f2bec0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3f2bec0

Branch: refs/heads/hbase-12439
Commit: b3f2bec099994d78d1866751f88d7526d9169995
Parents: 45da294
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Dec 22 20:47:36 2016 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Fri Dec 23 09:27:12 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |   16 +
 .../hbase/client/ConnectionImplementation.java  |   16 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   24 +
 .../client/replication/ReplicationAdmin.java    |    8 +-
 .../hbase/shaded/protobuf/RequestConverter.java |   16 +
 .../shaded/protobuf/generated/MasterProtos.java |  171 +-
 .../protobuf/generated/ReplicationProtos.java   | 1902 +++++++++++++++++-
 .../src/main/protobuf/Master.proto              |    8 +
 .../src/main/protobuf/Replication.proto         |   14 +
 .../hbase/coprocessor/MasterObserver.java       |   40 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   24 +
 .../hbase/master/MasterCoprocessorHost.java     |   40 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   26 +
 .../hadoop/hbase/master/MasterServices.java     |   12 +
 .../master/replication/ReplicationManager.java  |    8 +
 .../hbase/security/access/AccessController.java |   12 +
 .../hbase/master/MockNoopMasterServices.java    |    8 +
 .../security/access/TestAccessController.java   |   30 +
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |    2 +
 19 files changed, 2366 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index d284fc8..e7ea4d9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1842,4 +1842,20 @@ public interface Admin extends Abortable, Closeable {
    */
   default void removeReplicationPeer(final String peerId) throws IOException {
   }
+
+  /**
+   * Restart the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   * @throws IOException
+   */
+  default void enableReplicationPeer(final String peerId) throws IOException {
+  }
+
+  /**
+   * Stop the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   * @throws IOException
+   */
+  default void disableReplicationPeer(final String peerId) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 4e31f2c..ff939aa 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -90,6 +90,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormali
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -1653,6 +1657,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
           RemoveReplicationPeerRequest request) throws ServiceException {
         return stub.removeReplicationPeer(controller, request);
       }
+
+      @Override
+      public EnableReplicationPeerResponse enableReplicationPeer(RpcController controller,
+          EnableReplicationPeerRequest request) throws ServiceException {
+        return stub.enableReplicationPeer(controller, request);
+      }
+
+      @Override
+      public DisableReplicationPeerResponse disableReplicationPeer(RpcController controller,
+          DisableReplicationPeerRequest request) throws ServiceException {
+        return stub.disableReplicationPeer(controller, request);
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 19831c1..61f7435 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3770,4 +3770,28 @@ public class HBaseAdmin implements Admin {
       }
     });
   }
+
+  @Override
+  public void enableReplicationPeer(final String peerId) throws IOException {
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      protected Void rpcCall() throws Exception {
+        master.enableReplicationPeer(getRpcController(),
+          RequestConverter.buildEnableReplicationPeerRequest(peerId));
+        return null;
+      }
+    });
+  }
+
+  @Override
+  public void disableReplicationPeer(final String peerId) throws IOException {
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      protected Void rpcCall() throws Exception {
+        master.disableReplicationPeer(getRpcController(),
+          RequestConverter.buildDisableReplicationPeerRequest(peerId));
+        return null;
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index e6b9b0d..12bdb81 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -228,16 +228,16 @@ public class ReplicationAdmin implements Closeable {
    * Restart the replication stream to the specified peer.
    * @param id a short name that identifies the cluster
    */
-  public void enablePeer(String id) throws ReplicationException {
-    this.replicationPeers.enablePeer(id);
+  public void enablePeer(String id) throws IOException {
+    this.admin.enableReplicationPeer(id);
   }
 
   /**
    * Stop the replication stream to the specified peer.
    * @param id a short name that identifies the cluster
    */
-  public void disablePeer(String id) throws ReplicationException {
-    this.replicationPeers.disablePeer(id);
+  public void disablePeer(String id) throws IOException {
+    this.admin.disableReplicationPeer(id);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index cd4712a..446cd89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -114,6 +114,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -1580,4 +1582,18 @@ public final class RequestConverter {
     builder.setPeerId(peerId);
     return builder.build();
   }
+
+  public static ReplicationProtos.EnableReplicationPeerRequest buildEnableReplicationPeerRequest(
+      String peerId) {
+    EnableReplicationPeerRequest.Builder builder = EnableReplicationPeerRequest.newBuilder();
+    builder.setPeerId(peerId);
+    return builder.build();
+  }
+
+  public static ReplicationProtos.DisableReplicationPeerRequest buildDisableReplicationPeerRequest(
+      String peerId) {
+    DisableReplicationPeerRequest.Builder builder = DisableReplicationPeerRequest.newBuilder();
+    builder.setPeerId(peerId);
+    return builder.build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b3f2bec0/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index da5de63..2af3982 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -66368,6 +66368,30 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
 
+      /**
+       * <pre>
+       ** Enable a replication peer 
+       * </pre>
+       *
+       * <code>rpc EnableReplicationPeer(.hbase.pb.EnableReplicationPeerRequest) returns (.hbase.pb.EnableReplicationPeerResponse);</code>
+       */
+      public abstract void enableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse> done);
+
+      /**
+       * <pre>
+       ** Disable a replication peer 
+       * </pre>
+       *
+       * <code>rpc DisableReplicationPeer(.hbase.pb.DisableReplicationPeerRequest) returns (.hbase.pb.DisableReplicationPeerResponse);</code>
+       */
+      public abstract void disableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse> done);
+
     }
 
     public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@@ -66853,6 +66877,22 @@ public final class MasterProtos {
           impl.removeReplicationPeer(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void enableReplicationPeer(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse> done) {
+          impl.enableReplicationPeer(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void disableReplicationPeer(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse> done) {
+          impl.disableReplicationPeer(controller, request, done);
+        }
+
       };
     }
 
@@ -66995,6 +67035,10 @@ public final class MasterProtos {
               return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
             case 59:
               return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
+            case 60:
+              return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request);
+            case 61:
+              return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -67129,6 +67173,10 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+            case 60:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+            case 61:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -67263,6 +67311,10 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+            case 60:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+            case 61:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -68020,6 +68072,30 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
 
+    /**
+     * <pre>
+     ** Enable a replication peer 
+     * </pre>
+     *
+     * <code>rpc EnableReplicationPeer(.hbase.pb.EnableReplicationPeerRequest) returns (.hbase.pb.EnableReplicationPeerResponse);</code>
+     */
+    public abstract void enableReplicationPeer(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse> done);
+
+    /**
+     * <pre>
+     ** Disable a replication peer 
+     * </pre>
+     *
+     * <code>rpc DisableReplicationPeer(.hbase.pb.DisableReplicationPeerRequest) returns (.hbase.pb.DisableReplicationPeerResponse);</code>
+     */
+    public abstract void disableReplicationPeer(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse> done);
+
     public static final
         org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -68342,6 +68418,16 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse>specializeCallback(
               done));
           return;
+        case 60:
+          this.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse>specializeCallback(
+              done));
+          return;
+        case 61:
+          this.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -68476,6 +68562,10 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+        case 60:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+        case 61:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -68610,6 +68700,10 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+        case 60:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+        case 61:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -69530,6 +69624,36 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class,
             org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()));
       }
+
+      public  void enableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance()));
+      }
+
+      public  void disableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(61),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -69837,6 +69961,16 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse enableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse disableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -70565,6 +70699,30 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse enableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse disableReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(61),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -71346,7 +71504,7 @@ public final class MasterProtos {
       "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022",
       "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
       "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
-      "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\374*\n\rMasterServ" +
+      "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\323,\n\rMasterServ" +
       "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
       "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
       "tSchemaAlterStatusResponse\022b\n\023GetTableDe" +
@@ -71484,9 +71642,14 @@ public final class MasterProtos {
       "eplicationPeerResponse\022h\n\025RemoveReplicat" +
       "ionPeer\022&.hbase.pb.RemoveReplicationPeer" +
       "Request\032\'.hbase.pb.RemoveReplicationPeer",
-      "ResponseBI\n1org.apache.hadoop.hbase.shad" +
-      "ed.protobuf.generatedB\014MasterProtosH\001\210\001\001" +
-      "\240\001\001"
+      "Response\022h\n\025EnableReplicationPeer\022&.hbas" +
+      "e.pb.EnableReplicationPeerRequest\032\'.hbas" +
+      "e.pb.EnableReplicationPeerResponse\022k\n\026Di" +
+      "sableReplicationPeer\022\'.hbase.pb.DisableR" +
+      "eplicationPeerRequest\032(.hbase.pb.Disable" +
+      "ReplicationPeerResponseBI\n1org.apache.ha" +
+      "doop.hbase.shaded.protobuf.generatedB\014Ma" +
+      "sterProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {


[13/50] [abbrv] hbase git commit: HBASE-17334 Add locate row before/after support for AsyncRegionLocator

Posted by sy...@apache.org.
HBASE-17334 Add locate row before/after support for AsyncRegionLocator


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/09bb4287
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/09bb4287
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/09bb4287

Branch: refs/heads/hbase-12439
Commit: 09bb4287631563df934cfe88b16fa6dc03e490e6
Parents: 6678186
Author: zhangduo <zh...@apache.org>
Authored: Thu Dec 22 14:22:41 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Thu Dec 22 20:39:01 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/AsyncClientScanner.java |  14 +-
 .../client/AsyncMultiGetRpcRetryingCaller.java  |   3 +-
 .../hbase/client/AsyncNonMetaRegionLocator.java | 183 ++++++++-----------
 .../hadoop/hbase/client/AsyncRegionLocator.java |  23 +--
 .../client/AsyncRpcRetryingCallerFactory.java   |  10 +-
 .../AsyncScanSingleRegionRpcRetryingCaller.java |   9 +-
 .../AsyncSingleRequestRpcRetryingCaller.java    |  46 ++---
 .../client/AsyncSmallScanRpcRetryingCaller.java |  17 +-
 .../client/AsyncTableRegionLocatorImpl.java     |   2 +-
 .../hadoop/hbase/client/RegionLocateType.java   |  33 ++++
 .../client/TestAsyncNonMetaRegionLocator.java   | 101 ++++++----
 ...syncNonMetaRegionLocatorConcurrenyLimit.java |  13 +-
 .../client/TestAsyncRegionLocatorTimeout.java   |   8 +-
 ...TestAsyncSingleRequestRpcRetryingCaller.java |  10 +-
 14 files changed, 236 insertions(+), 236 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index 74c20de..dfffd39 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -118,23 +118,22 @@ class AsyncClientScanner {
         .setScan(scan).consumer(consumer).resultCache(resultCache)
         .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
         .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start()
-        .whenComplete((locateToPreviousRegion, error) -> {
+        .whenComplete((locateType, error) -> {
           if (error != null) {
             consumer.onError(error);
             return;
           }
-          if (locateToPreviousRegion == null) {
+          if (locateType == null) {
             consumer.onComplete();
           } else {
-            openScanner(locateToPreviousRegion.booleanValue());
+            openScanner(locateType);
           }
         });
   }
 
-  private void openScanner(boolean locateToPreviousRegion) {
+  private void openScanner(RegionLocateType locateType) {
     conn.callerFactory.<OpenScannerResponse> single().table(tableName).row(scan.getStartRow())
-        .locateToPreviousRegion(locateToPreviousRegion)
-        .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
+        .locateType(locateType).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
         .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::callOpenScanner).call()
         .whenComplete((resp, error) -> {
           if (error != null) {
@@ -146,6 +145,7 @@ class AsyncClientScanner {
   }
 
   public void start() {
-    openScanner(scan.isReversed() && isEmptyStartRow(scan.getStartRow()));
+    openScanner(scan.isReversed() && isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE
+        : RegionLocateType.CURRENT);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
index 8a9b9a8..e1208c2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMultiGetRpcRetryingCaller.java
@@ -374,7 +374,8 @@ class AsyncMultiGetRpcRetryingCaller {
         new ConcurrentHashMap<>();
     ConcurrentLinkedQueue<Get> locateFailed = new ConcurrentLinkedQueue<>();
     CompletableFuture.allOf(gets.map(get -> conn.getLocator()
-        .getRegionLocation(tableName, get.getRow(), locateTimeoutNs).whenComplete((loc, error) -> {
+        .getRegionLocation(tableName, get.getRow(), RegionLocateType.CURRENT, locateTimeoutNs)
+        .whenComplete((loc, error) -> {
           if (error != null) {
             error = translateException(error);
             if (error instanceof DoNotRetryIOException) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index c22d210..ae79b65 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hbase.HConstants.ZEROES;
 import static org.apache.hadoop.hbase.HRegionInfo.createRegionName;
 import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
 import static org.apache.hadoop.hbase.client.AsyncRegionLocator.updateCachedLoation;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.createClosestRowAfter;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
 import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR;
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
@@ -77,16 +78,16 @@ class AsyncNonMetaRegionLocator {
 
     public final byte[] row;
 
-    public final boolean locateToPrevious;
+    public final RegionLocateType locateType;
 
-    public LocateRequest(byte[] row, boolean locateToPrevious) {
+    public LocateRequest(byte[] row, RegionLocateType locateType) {
       this.row = row;
-      this.locateToPrevious = locateToPrevious;
+      this.locateType = locateType;
     }
 
     @Override
     public int hashCode() {
-      return Bytes.hashCode(row) ^ Boolean.hashCode(locateToPrevious);
+      return Bytes.hashCode(row) ^ locateType.hashCode();
     }
 
     @Override
@@ -95,7 +96,7 @@ class AsyncNonMetaRegionLocator {
         return false;
       }
       LocateRequest that = (LocateRequest) obj;
-      return locateToPrevious == that.locateToPrevious && Bytes.equals(row, that.row);
+      return locateType.equals(that.locateType) && Bytes.equals(row, that.row);
     }
   }
 
@@ -192,8 +193,14 @@ class AsyncNonMetaRegionLocator {
       return true;
     }
     boolean completed;
-    if (req.locateToPrevious) {
-      completed = Bytes.equals(loc.getRegionInfo().getEndKey(), req.row);
+    if (req.locateType.equals(RegionLocateType.BEFORE)) {
+      // for locating the row before current row, the common case is to find the previous region in
+      // reverse scan, so we check the endKey first. In general, the condition should be startKey <
+      // req.row and endKey >= req.row. Here we split it to endKey == req.row || (endKey > req.row
+      // && startKey < req.row). The two conditions are equal since startKey < endKey.
+      int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
+      completed =
+          c == 0 || (c > 0 && Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
     } else {
       completed = loc.getRegionInfo().containsRow(req.row);
     }
@@ -206,11 +213,11 @@ class AsyncNonMetaRegionLocator {
   }
 
   private void complete(TableName tableName, LocateRequest req, HRegionLocation loc,
-      Throwable error, String rowNameInErrorMsg) {
+      Throwable error) {
     if (error != null) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Failed to locate region in '" + tableName + "', " + rowNameInErrorMsg + "='"
-            + Bytes.toStringBinary(req.row) + "'",
+        LOG.debug("Failed to locate region in '" + tableName + "', row='"
+            + Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType,
           error);
       }
     }
@@ -254,87 +261,67 @@ class AsyncNonMetaRegionLocator {
       }
     }
     if (toSend != null) {
-      if (toSend.locateToPrevious) {
-        locatePreviousInMeta(tableName, toSend);
-      } else {
-        locateInMeta(tableName, toSend);
-      }
+      locateInMeta(tableName, toSend);
     }
   }
 
   private void onScanComplete(TableName tableName, LocateRequest req, List<Result> results,
-      Throwable error, String rowNameInErrorMsg) {
+      Throwable error) {
     if (error != null) {
-      complete(tableName, req, null, error, rowNameInErrorMsg);
+      complete(tableName, req, null, error);
       return;
     }
     if (results.isEmpty()) {
-      complete(tableName, req, null, new TableNotFoundException(tableName), rowNameInErrorMsg);
+      complete(tableName, req, null, new TableNotFoundException(tableName));
       return;
     }
     RegionLocations locs = MetaTableAccessor.getRegionLocations(results.get(0));
     if (LOG.isDebugEnabled()) {
-      LOG.debug("The fetched location of '" + tableName + "', " + rowNameInErrorMsg + "='"
-          + Bytes.toStringBinary(req.row) + "' is " + locs);
+      LOG.debug("The fetched location of '" + tableName + "', row='" + Bytes.toStringBinary(req.row)
+          + "', locateType=" + req.locateType + " is " + locs);
     }
     if (locs == null || locs.getDefaultRegionLocation() == null) {
       complete(tableName, req, null,
-        new IOException(String.format("No location found for '%s', %s='%s'", tableName,
-          rowNameInErrorMsg, Bytes.toStringBinary(req.row))),
-        rowNameInErrorMsg);
+        new IOException(String.format("No location found for '%s', row='%s', locateType=%s",
+          tableName, Bytes.toStringBinary(req.row), req.locateType)));
       return;
     }
     HRegionLocation loc = locs.getDefaultRegionLocation();
     HRegionInfo info = loc.getRegionInfo();
     if (info == null) {
       complete(tableName, req, null,
-        new IOException(String.format("HRegionInfo is null for '%s', %s='%s'", tableName,
-          rowNameInErrorMsg, Bytes.toStringBinary(req.row))),
-        rowNameInErrorMsg);
+        new IOException(String.format("HRegionInfo is null for '%s', row='%s', locateType=%s",
+          tableName, Bytes.toStringBinary(req.row), req.locateType)));
       return;
     }
     if (!info.getTable().equals(tableName)) {
-      complete(tableName, req, null,
-        new TableNotFoundException(
-            "Table '" + tableName + "' was not found, got: '" + info.getTable() + "'"),
-        rowNameInErrorMsg);
+      complete(tableName, req, null, new TableNotFoundException(
+          "Table '" + tableName + "' was not found, got: '" + info.getTable() + "'"));
       return;
     }
     if (info.isSplit()) {
       complete(tableName, req, null,
         new RegionOfflineException(
             "the only available region for the required row is a split parent,"
-                + " the daughters should be online soon: '" + info.getRegionNameAsString() + "'"),
-        rowNameInErrorMsg);
+                + " the daughters should be online soon: '" + info.getRegionNameAsString() + "'"));
       return;
     }
     if (info.isOffline()) {
-      complete(tableName, req, null,
-        new RegionOfflineException("the region is offline, could"
-            + " be caused by a disable table call: '" + info.getRegionNameAsString() + "'"),
-        rowNameInErrorMsg);
+      complete(tableName, req, null, new RegionOfflineException("the region is offline, could"
+          + " be caused by a disable table call: '" + info.getRegionNameAsString() + "'"));
       return;
     }
     if (loc.getServerName() == null) {
       complete(tableName, req, null,
         new NoServerForRegionException(
-            String.format("No server address listed for region '%s', %s='%s'",
-              info.getRegionNameAsString(), rowNameInErrorMsg, Bytes.toStringBinary(req.row))),
-        rowNameInErrorMsg);
-      return;
-    }
-    if (req.locateToPrevious && !Bytes.equals(info.getEndKey(), req.row)) {
-      complete(tableName, req, null,
-        new DoNotRetryIOException("The end key of '" + info.getRegionNameAsString() + "' is '"
-            + Bytes.toStringBinary(info.getEndKey()) + "', expected '"
-            + Bytes.toStringBinary(req.row) + "'"),
-        rowNameInErrorMsg);
+            String.format("No server address listed for region '%s', row='%s', locateType=%s",
+              info.getRegionNameAsString(), Bytes.toStringBinary(req.row), req.locateType)));
       return;
     }
-    complete(tableName, req, loc, null, rowNameInErrorMsg);
+    complete(tableName, req, loc, null);
   }
 
-  private HRegionLocation locateInCache(TableCache tableCache, TableName tableName, byte[] row) {
+  private HRegionLocation locateRowInCache(TableCache tableCache, TableName tableName, byte[] row) {
     Map.Entry<byte[], HRegionLocation> entry = tableCache.cache.floorEntry(row);
     if (entry == null) {
       return null;
@@ -344,7 +331,7 @@ class AsyncNonMetaRegionLocator {
     if (isEmptyStopRow(endKey) || Bytes.compareTo(row, endKey) < 0) {
       if (LOG.isTraceEnabled()) {
         LOG.trace("Found " + loc + " in cache for '" + tableName + "', row='"
-            + Bytes.toStringBinary(row) + "'");
+            + Bytes.toStringBinary(row) + "', locateType=" + RegionLocateType.CURRENT);
       }
       return loc;
     } else {
@@ -352,22 +339,19 @@ class AsyncNonMetaRegionLocator {
     }
   }
 
-  private HRegionLocation locatePreviousInCache(TableCache tableCache, TableName tableName,
-      byte[] startRowOfCurrentRegion) {
-    Map.Entry<byte[], HRegionLocation> entry;
-    if (isEmptyStopRow(startRowOfCurrentRegion)) {
-      entry = tableCache.cache.lastEntry();
-    } else {
-      entry = tableCache.cache.lowerEntry(startRowOfCurrentRegion);
-    }
+  private HRegionLocation locateRowBeforeInCache(TableCache tableCache, TableName tableName,
+      byte[] row) {
+    Map.Entry<byte[], HRegionLocation> entry =
+        isEmptyStopRow(row) ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row);
     if (entry == null) {
       return null;
     }
     HRegionLocation loc = entry.getValue();
-    if (Bytes.equals(loc.getRegionInfo().getEndKey(), startRowOfCurrentRegion)) {
+    if (isEmptyStopRow(loc.getRegionInfo().getEndKey())
+        || Bytes.compareTo(loc.getRegionInfo().getEndKey(), row) >= 0) {
       if (LOG.isTraceEnabled()) {
-        LOG.trace("Found " + loc + " in cache for '" + tableName + "', startRowOfCurrentRegion='"
-            + Bytes.toStringBinary(startRowOfCurrentRegion) + "'");
+        LOG.trace("Found " + loc + " in cache for '" + tableName + "', row='"
+            + Bytes.toStringBinary(row) + "', locateType=" + RegionLocateType.BEFORE);
       }
       return loc;
     } else {
@@ -377,46 +361,41 @@ class AsyncNonMetaRegionLocator {
 
   private void locateInMeta(TableName tableName, LocateRequest req) {
     if (LOG.isTraceEnabled()) {
-      LOG.trace(
-        "Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + "' in meta");
-    }
-    byte[] metaKey = createRegionName(tableName, req.row, NINES, false);
-    conn.getRawTable(META_TABLE_NAME)
-        .smallScan(new Scan(metaKey).setReversed(true).setSmall(true).addFamily(CATALOG_FAMILY), 1)
-        .whenComplete((results, error) -> onScanComplete(tableName, req, results, error, "row"));
-  }
-
-  private void locatePreviousInMeta(TableName tableName, LocateRequest req) {
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Try locate '" + tableName + "', startRowOfCurrentRegion='"
-          + Bytes.toStringBinary(req.row) + "' in meta");
+      LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row)
+          + "', locateType=" + req.locateType + " in meta");
     }
     byte[] metaKey;
-    if (isEmptyStopRow(req.row)) {
-      byte[] binaryTableName = tableName.getName();
-      metaKey = Arrays.copyOf(binaryTableName, binaryTableName.length + 1);
+    if (req.locateType.equals(RegionLocateType.BEFORE)) {
+      if (isEmptyStopRow(req.row)) {
+        byte[] binaryTableName = tableName.getName();
+        metaKey = Arrays.copyOf(binaryTableName, binaryTableName.length + 1);
+      } else {
+        metaKey = createRegionName(tableName, req.row, ZEROES, false);
+      }
     } else {
-      metaKey = createRegionName(tableName, req.row, ZEROES, false);
+      metaKey = createRegionName(tableName, req.row, NINES, false);
     }
     conn.getRawTable(META_TABLE_NAME)
         .smallScan(new Scan(metaKey).setReversed(true).setSmall(true).addFamily(CATALOG_FAMILY), 1)
-        .whenComplete((results, error) -> onScanComplete(tableName, req, results, error,
-          "startRowOfCurrentRegion"));
+        .whenComplete((results, error) -> onScanComplete(tableName, req, results, error));
   }
 
   private HRegionLocation locateInCache(TableCache tableCache, TableName tableName, byte[] row,
-      boolean locateToPrevious) {
-    return locateToPrevious ? locatePreviousInCache(tableCache, tableName, row)
-        : locateInCache(tableCache, tableName, row);
+      RegionLocateType locateType) {
+    return locateType.equals(RegionLocateType.BEFORE)
+        ? locateRowBeforeInCache(tableCache, tableName, row)
+        : locateRowInCache(tableCache, tableName, row);
   }
 
   // locateToPrevious is true means we will use the start key of a region to locate the region
   // placed before it. Used for reverse scan. See the comment of
   // AsyncRegionLocator.getPreviousRegionLocation.
-  private CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
-      boolean locateToPrevious) {
+  private CompletableFuture<HRegionLocation> getRegionLocationInternal(TableName tableName,
+      byte[] row, RegionLocateType locateType) {
+    // AFTER should be convert to CURRENT before calling this method
+    assert !locateType.equals(RegionLocateType.AFTER);
     TableCache tableCache = getTableCache(tableName);
-    HRegionLocation loc = locateInCache(tableCache, tableName, row, locateToPrevious);
+    HRegionLocation loc = locateInCache(tableCache, tableName, row, locateType);
     if (loc != null) {
       return CompletableFuture.completedFuture(loc);
     }
@@ -425,11 +404,11 @@ class AsyncNonMetaRegionLocator {
     boolean sendRequest = false;
     synchronized (tableCache) {
       // check again
-      loc = locateInCache(tableCache, tableName, row, locateToPrevious);
+      loc = locateInCache(tableCache, tableName, row, locateType);
       if (loc != null) {
         return CompletableFuture.completedFuture(loc);
       }
-      req = new LocateRequest(row, locateToPrevious);
+      req = new LocateRequest(row, locateType);
       future = tableCache.allRequests.get(req);
       if (future == null) {
         future = new CompletableFuture<>();
@@ -441,25 +420,23 @@ class AsyncNonMetaRegionLocator {
       }
     }
     if (sendRequest) {
-      if (locateToPrevious) {
-        locatePreviousInMeta(tableName, req);
-      } else {
-        locateInMeta(tableName, req);
-      }
+      locateInMeta(tableName, req);
     }
     return future;
   }
 
-  CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row) {
-    return getRegionLocation(tableName, row, false);
-  }
-
-  // Used for reverse scan. See the comment of AsyncRegionLocator.getPreviousRegionLocation.
-  // TODO: need to deal with region merge where the startRowOfCurrentRegion will not be the endRow
-  // of a region.
-  CompletableFuture<HRegionLocation> getPreviousRegionLocation(TableName tableName,
-      byte[] startRowOfCurrentRegion) {
-    return getRegionLocation(tableName, startRowOfCurrentRegion, true);
+  CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
+      RegionLocateType locateType) {
+    if (locateType.equals(RegionLocateType.BEFORE)) {
+      return getRegionLocationInternal(tableName, row, locateType);
+    } else {
+      // as we know the exact row after us, so we can just create the new row, and use the same
+      // algorithm to locate it.
+      if (locateType.equals(RegionLocateType.AFTER)) {
+        row = createClosestRowAfter(row);
+      }
+      return getRegionLocationInternal(tableName, row, RegionLocateType.CURRENT);
+    }
   }
 
   void updateCachedLocation(HRegionLocation loc, Throwable exception) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 1c3569a..7a45ae3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -79,33 +79,18 @@ class AsyncRegionLocator {
   }
 
   CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
-      long timeoutNs) {
+      RegionLocateType type, long timeoutNs) {
+    // meta region can not be split right now so we always call the same method.
+    // Change it later if the meta table can have more than one regions.
     CompletableFuture<HRegionLocation> future =
         tableName.equals(META_TABLE_NAME) ? metaRegionLocator.getRegionLocation()
-            : nonMetaRegionLocator.getRegionLocation(tableName, row);
+            : nonMetaRegionLocator.getRegionLocation(tableName, row, type);
     return withTimeout(future, timeoutNs,
       () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs)
           + "ms) waiting for region location for " + tableName + ", row='"
           + Bytes.toStringBinary(row) + "'");
   }
 
-  /**
-   * Locate the previous region using the current regions start key. Used for reverse scan as the
-   * end key is not included in a region so we need to treat it differently.
-   */
-  CompletableFuture<HRegionLocation> getPreviousRegionLocation(TableName tableName,
-      byte[] startRowOfCurrentRegion, long timeoutNs) {
-    // meta region can not be split right now so we call the same method as getRegionLocation.
-    // Change it later if the meta table can have more than one regions.
-    CompletableFuture<HRegionLocation> future =
-        tableName.equals(META_TABLE_NAME) ? metaRegionLocator.getRegionLocation()
-            : nonMetaRegionLocator.getPreviousRegionLocation(tableName, startRowOfCurrentRegion);
-    return withTimeout(future, timeoutNs,
-      () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs)
-          + "ms) waiting for region location for " + tableName + ", startRowOfCurrentRegion='"
-          + Bytes.toStringBinary(startRowOfCurrentRegion) + "'");
-  }
-
   static boolean canUpdate(HRegionLocation loc, HRegionLocation oldLoc) {
     // Do not need to update if no such location, or the location is newer, or the location is not
     // same with us

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index f1a4247..d240fab 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -58,7 +58,7 @@ class AsyncRpcRetryingCallerFactory {
 
     private long rpcTimeoutNs = -1L;
 
-    private boolean locateToPreviousRegion;
+    private RegionLocateType locateType = RegionLocateType.CURRENT;
 
     public SingleRequestCallerBuilder<T> table(TableName tableName) {
       this.tableName = tableName;
@@ -86,15 +86,15 @@ class AsyncRpcRetryingCallerFactory {
       return this;
     }
 
-    public SingleRequestCallerBuilder<T> locateToPreviousRegion(boolean locateToPreviousRegion) {
-      this.locateToPreviousRegion = locateToPreviousRegion;
+    public SingleRequestCallerBuilder<T> locateType(RegionLocateType locateType) {
+      this.locateType = locateType;
       return this;
     }
 
     public AsyncSingleRequestRpcRetryingCaller<T> build() {
       return new AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn,
           checkNotNull(tableName, "tableName is null"), checkNotNull(row, "row is null"),
-          locateToPreviousRegion, checkNotNull(callable, "action is null"),
+          checkNotNull(locateType, "locateType is null"), checkNotNull(callable, "action is null"),
           conn.connConf.getPauseNs(), conn.connConf.getMaxRetries(), operationTimeoutNs,
           rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt());
     }
@@ -246,7 +246,7 @@ class AsyncRpcRetryingCallerFactory {
     /**
      * Short cut for {@code build().start()}.
      */
-    public CompletableFuture<Boolean> start() {
+    public CompletableFuture<RegionLocateType> start() {
       return build().start();
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index ca83a51..81c806f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -95,7 +95,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
   private final Runnable completeWhenNoMoreResultsInRegion;
 
-  private final CompletableFuture<Boolean> future;
+  private final CompletableFuture<RegionLocateType> future;
 
   private final HBaseRpcController controller;
 
@@ -172,7 +172,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
 
   private void completeWithNextStartRow(byte[] nextStartRow) {
     scan.setStartRow(nextStartRow);
-    future.complete(scan.isReversed());
+    future.complete(scan.isReversed() ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
   }
 
   private byte[] createNextStartRowWhenError() {
@@ -193,7 +193,8 @@ class AsyncScanSingleRegionRpcRetryingCaller {
         includeNextStartRowWhenError ? nextStartRowWhenError : createNextStartRowWhenError.get());
     }
     future.complete(
-      scan.isReversed() && Bytes.equals(scan.getStartRow(), loc.getRegionInfo().getEndKey()));
+      scan.isReversed() && Bytes.equals(scan.getStartRow(), loc.getRegionInfo().getEndKey())
+          ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
   }
 
   private void onError(Throwable error) {
@@ -344,7 +345,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   /**
    * @return return locate direction for next open scanner call, or null if we should stop.
    */
-  public CompletableFuture<Boolean> start() {
+  public CompletableFuture<RegionLocateType> start() {
     next();
     return future;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
index d6da131..0b4add1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
@@ -31,7 +31,6 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
-import java.util.function.Function;
 import java.util.function.Supplier;
 
 import org.apache.commons.logging.Log;
@@ -67,7 +66,7 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
 
   private final byte[] row;
 
-  private final Function<Long, CompletableFuture<HRegionLocation>> locate;
+  private final RegionLocateType locateType;
 
   private final Callable<T> callable;
 
@@ -90,18 +89,14 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
   private final long startNs;
 
   public AsyncSingleRequestRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
-      TableName tableName, byte[] row, boolean locateToPreviousRegion, Callable<T> callable,
+      TableName tableName, byte[] row, RegionLocateType locateType, Callable<T> callable,
       long pauseNs, int maxRetries, long operationTimeoutNs, long rpcTimeoutNs,
       int startLogErrorsCnt) {
     this.retryTimer = retryTimer;
     this.conn = conn;
     this.tableName = tableName;
     this.row = row;
-    if (locateToPreviousRegion) {
-      this.locate = this::locatePrevious;
-    } else {
-      this.locate = this::locate;
-    }
+    this.locateType = locateType;
     this.callable = callable;
     this.pauseNs = pauseNs;
     this.maxAttempts = retries2Attempts(maxRetries);
@@ -210,27 +205,20 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
     } else {
       locateTimeoutNs = -1L;
     }
-    locate.apply(locateTimeoutNs).whenComplete((loc, error) -> {
-      if (error != null) {
-        onError(error,
-          () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName + " failed, tries = "
-              + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
-              + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
-              + elapsedMs() + " ms",
-          err -> {
-          });
-        return;
-      }
-      call(loc);
-    });
-  }
-
-  private CompletableFuture<HRegionLocation> locate(long timeoutNs) {
-    return conn.getLocator().getRegionLocation(tableName, row, timeoutNs);
-  }
-
-  private CompletableFuture<HRegionLocation> locatePrevious(long timeoutNs) {
-    return conn.getLocator().getPreviousRegionLocation(tableName, row, timeoutNs);
+    conn.getLocator().getRegionLocation(tableName, row, locateType, locateTimeoutNs)
+        .whenComplete((loc, error) -> {
+          if (error != null) {
+            onError(error,
+              () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName
+                  + " failed, tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
+                  + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
+                  + elapsedMs() + " ms",
+              err -> {
+              });
+            return;
+          }
+          call(loc);
+        });
   }
 
   public CompletableFuture<T> call() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
index af639c0..c4c2074 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
@@ -144,7 +144,7 @@ class AsyncSmallScanRpcRetryingCaller {
         scan.setStartRow(
           createClosestNextRow.apply(resp.results[resp.results.length - 1].getRow()));
       }
-      scan(false);
+      scan(RegionLocateType.CURRENT);
       return;
     }
     if (!nextScan.apply(resp.currentRegion)) {
@@ -152,12 +152,11 @@ class AsyncSmallScanRpcRetryingCaller {
     }
   }
 
-  private void scan(boolean locateToPreviousRegion) {
+  private void scan(RegionLocateType locateType) {
     conn.callerFactory.<SmallScanResponse> single().table(tableName).row(scan.getStartRow())
         .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS)
-        .locateToPreviousRegion(locateToPreviousRegion).action(this::scan).call()
-        .whenComplete((resp, error) -> {
+        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).locateType(locateType)
+        .action(this::scan).call().whenComplete((resp, error) -> {
           if (error != null) {
             future.completeExceptionally(error);
           } else {
@@ -172,11 +171,11 @@ class AsyncSmallScanRpcRetryingCaller {
   }
 
   private void firstScan() {
-    scan(false);
+    scan(RegionLocateType.CURRENT);
   }
 
   private void reversedFirstScan() {
-    scan(isEmptyStartRow(scan.getStartRow()));
+    scan(isEmptyStartRow(scan.getStartRow()) ? RegionLocateType.BEFORE : RegionLocateType.CURRENT);
   }
 
   private boolean nextScan(HRegionInfo region) {
@@ -190,7 +189,7 @@ class AsyncSmallScanRpcRetryingCaller {
       }
     }
     scan.setStartRow(region.getEndKey());
-    scan(false);
+    scan(RegionLocateType.CURRENT);
     return true;
   }
 
@@ -205,7 +204,7 @@ class AsyncSmallScanRpcRetryingCaller {
       }
     }
     scan.setStartRow(region.getStartKey());
-    scan(true);
+    scan(RegionLocateType.BEFORE);
     return true;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
index e1f40a7..1986962 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java
@@ -45,6 +45,6 @@ class AsyncTableRegionLocatorImpl implements AsyncTableRegionLocator {
 
   @Override
   public CompletableFuture<HRegionLocation> getRegionLocation(byte[] row, boolean reload) {
-    return locator.getRegionLocation(tableName, row, 0L);
+    return locator.getRegionLocation(tableName, row, RegionLocateType.CURRENT, -1L);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
new file mode 100644
index 0000000..57f9498
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLocateType.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Indicate which row you want to locate.
+ * <ul>
+ * <li>{@link #BEFORE} locate the region which contains the row before the given row.</li>
+ * <li>{@link #CURRENT} locate the region which contains the given row.</li>
+ * <li>{@link #AFTER} locate the region which contains the row after the given row.</li>
+ * </ul>
+ */
+@InterfaceAudience.Private
+enum RegionLocateType {
+  BEFORE, CURRENT, AFTER
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
index f3aa26b..40fca72 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocator.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.util.Arrays;
@@ -102,15 +101,12 @@ public class TestAsyncNonMetaRegionLocator {
 
   @Test
   public void testNoTable() throws InterruptedException {
-    try {
-      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get();
-    } catch (ExecutionException e) {
-      assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
-    }
-    try {
-      LOCATOR.getPreviousRegionLocation(TABLE_NAME, EMPTY_END_ROW).get();
-    } catch (ExecutionException e) {
-      assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
+    for (RegionLocateType locateType : RegionLocateType.values()) {
+      try {
+        LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType).get();
+      } catch (ExecutionException e) {
+        assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
+      }
     }
   }
 
@@ -118,15 +114,12 @@ public class TestAsyncNonMetaRegionLocator {
   public void testDisableTable() throws IOException, InterruptedException {
     createSingleRegionTable();
     TEST_UTIL.getAdmin().disableTable(TABLE_NAME);
-    try {
-      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get();
-    } catch (ExecutionException e) {
-      assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
-    }
-    try {
-      LOCATOR.getPreviousRegionLocation(TABLE_NAME, EMPTY_END_ROW).get();
-    } catch (ExecutionException e) {
-      assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
+    for (RegionLocateType locateType : RegionLocateType.values()) {
+      try {
+        LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType).get();
+      } catch (ExecutionException e) {
+        assertThat(e.getCause(), instanceOf(TableNotFoundException.class));
+      }
     }
   }
 
@@ -143,21 +136,15 @@ public class TestAsyncNonMetaRegionLocator {
   public void testSingleRegionTable() throws IOException, InterruptedException, ExecutionException {
     createSingleRegionTable();
     ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName();
-    assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
-      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get());
-    assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
-      LOCATOR.getPreviousRegionLocation(TABLE_NAME, EMPTY_START_ROW).get());
+    for (RegionLocateType locateType : RegionLocateType.values()) {
+      assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
+        LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, locateType).get());
+    }
     byte[] randKey = new byte[ThreadLocalRandom.current().nextInt(128)];
     ThreadLocalRandom.current().nextBytes(randKey);
-    assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
-      LOCATOR.getRegionLocation(TABLE_NAME, randKey).get());
-    // Use a key which is not the endKey of a region will cause error
-    try {
+    for (RegionLocateType locateType : RegionLocateType.values()) {
       assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName,
-        LOCATOR.getPreviousRegionLocation(TABLE_NAME, new byte[] { 1 }).get());
-    } catch (ExecutionException e) {
-      assertThat(e.getCause(), instanceOf(IOException.class));
-      assertTrue(e.getCause().getMessage().contains("end key of"));
+        LOCATOR.getRegionLocation(TABLE_NAME, randKey, locateType).get());
     }
   }
 
@@ -194,7 +181,19 @@ public class TestAsyncNonMetaRegionLocator {
     IntStream.range(0, 2).forEach(n -> IntStream.range(0, startKeys.length).forEach(i -> {
       try {
         assertLocEquals(startKeys[i], i == startKeys.length - 1 ? EMPTY_END_ROW : startKeys[i + 1],
-          serverNames[i], LOCATOR.getRegionLocation(TABLE_NAME, startKeys[i]).get());
+          serverNames[i],
+          LOCATOR.getRegionLocation(TABLE_NAME, startKeys[i], RegionLocateType.CURRENT).get());
+      } catch (InterruptedException | ExecutionException e) {
+        throw new RuntimeException(e);
+      }
+    }));
+
+    LOCATOR.clearCache(TABLE_NAME);
+    IntStream.range(0, 2).forEach(n -> IntStream.range(0, startKeys.length).forEach(i -> {
+      try {
+        assertLocEquals(startKeys[i], i == startKeys.length - 1 ? EMPTY_END_ROW : startKeys[i + 1],
+          serverNames[i],
+          LOCATOR.getRegionLocation(TABLE_NAME, startKeys[i], RegionLocateType.AFTER).get());
       } catch (InterruptedException | ExecutionException e) {
         throw new RuntimeException(e);
       }
@@ -205,7 +204,7 @@ public class TestAsyncNonMetaRegionLocator {
       n -> IntStream.range(0, endKeys.length).map(i -> endKeys.length - 1 - i).forEach(i -> {
         try {
           assertLocEquals(i == 0 ? EMPTY_START_ROW : endKeys[i - 1], endKeys[i], serverNames[i],
-            LOCATOR.getPreviousRegionLocation(TABLE_NAME, endKeys[i]).get());
+            LOCATOR.getRegionLocation(TABLE_NAME, endKeys[i], RegionLocateType.BEFORE).get());
         } catch (InterruptedException | ExecutionException e) {
           throw new RuntimeException(e);
         }
@@ -216,7 +215,8 @@ public class TestAsyncNonMetaRegionLocator {
   public void testRegionMove() throws IOException, InterruptedException, ExecutionException {
     createSingleRegionTable();
     ServerName serverName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName();
-    HRegionLocation loc = LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get();
+    HRegionLocation loc =
+        LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT).get();
     assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, serverName, loc);
     ServerName newServerName = TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream()
         .map(t -> t.getRegionServer().getServerName()).filter(sn -> !sn.equals(serverName))
@@ -229,12 +229,39 @@ public class TestAsyncNonMetaRegionLocator {
       Thread.sleep(100);
     }
     // Should be same as it is in cache
-    assertSame(loc, LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get());
+    assertSame(loc,
+      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT).get());
     LOCATOR.updateCachedLocation(loc, null);
     // null error will not trigger a cache cleanup
-    assertSame(loc, LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get());
+    assertSame(loc,
+      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT).get());
     LOCATOR.updateCachedLocation(loc, new NotServingRegionException());
     assertLocEquals(EMPTY_START_ROW, EMPTY_END_ROW, newServerName,
-      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW).get());
+      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT).get());
+  }
+
+  // usually locate after will return the same result, so we add a test to make it return different
+  // result.
+  @Test
+  public void testLocateAfter() throws IOException, InterruptedException, ExecutionException {
+    byte[] row = Bytes.toBytes("1");
+    byte[] splitKey = Arrays.copyOf(row, 2);
+    TEST_UTIL.createTable(TABLE_NAME, FAMILY, new byte[][] { splitKey });
+    TEST_UTIL.waitTableAvailable(TABLE_NAME);
+    HRegionLocation currentLoc =
+        LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.CURRENT).get();
+    ServerName currentServerName = TEST_UTIL.getRSForFirstRegionInTable(TABLE_NAME).getServerName();
+    assertLocEquals(EMPTY_START_ROW, splitKey, currentServerName, currentLoc);
+
+    HRegionLocation afterLoc =
+        LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER).get();
+    ServerName afterServerName =
+        TEST_UTIL.getHBaseCluster().getRegionServerThreads().stream().map(t -> t.getRegionServer())
+            .filter(rs -> rs.getOnlineRegions(TABLE_NAME).stream()
+                .anyMatch(r -> Bytes.equals(splitKey, r.getRegionInfo().getStartKey())))
+            .findAny().get().getServerName();
+    assertLocEquals(splitKey, EMPTY_END_ROW, afterServerName, afterLoc);
+
+    assertSame(afterLoc, LOCATOR.getRegionLocation(TABLE_NAME, row, RegionLocateType.AFTER).get());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
index e82703b..3918dc9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncNonMetaRegionLocatorConcurrenyLimit.java
@@ -17,9 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static java.util.stream.Collectors.toCollection;
 import static java.util.stream.Collectors.toList;
-import static org.apache.hadoop.hbase.HConstants.EMPTY_START_ROW;
 import static org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStartRow;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow;
@@ -28,7 +26,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutionException;
@@ -147,12 +144,10 @@ public class TestAsyncNonMetaRegionLocatorConcurrenyLimit {
 
   @Test
   public void test() throws InterruptedException, ExecutionException {
-    List<CompletableFuture<HRegionLocation>> futures = IntStream.range(0, 128)
-        .mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
-        .map(r -> LOCATOR.getRegionLocation(TABLE_NAME, r)).collect(toCollection(ArrayList::new));
-    futures.addAll(IntStream.range(129, 257)
-        .mapToObj(i -> i < 256 ? Bytes.toBytes(String.format("%02x", i)) : EMPTY_START_ROW)
-        .map(r -> LOCATOR.getPreviousRegionLocation(TABLE_NAME, r)).collect(toList()));
+    List<CompletableFuture<HRegionLocation>> futures =
+        IntStream.range(0, 256).mapToObj(i -> Bytes.toBytes(String.format("%02x", i)))
+            .map(r -> LOCATOR.getRegionLocation(TABLE_NAME, r, RegionLocateType.CURRENT))
+            .collect(toList());
     assertLocs(futures);
     assertTrue(MAX_CONCURRENCY.get() <= MAX_ALLOWED);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java
index 40190cb..dfefcc7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTimeout.java
@@ -100,8 +100,8 @@ public class TestAsyncRegionLocatorTimeout {
     SLEEP_MS = 1000;
     long startNs = System.nanoTime();
     try {
-      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, TimeUnit.MILLISECONDS.toNanos(500))
-          .get();
+      LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW, RegionLocateType.CURRENT,
+        TimeUnit.MILLISECONDS.toNanos(500)).get();
       fail();
     } catch (ExecutionException e) {
       e.printStackTrace();
@@ -113,8 +113,8 @@ public class TestAsyncRegionLocatorTimeout {
     // wait for the background task finish
     Thread.sleep(2000);
     // Now the location should be in cache, so we will not visit meta again.
-    HRegionLocation loc = LOCATOR
-        .getRegionLocation(TABLE_NAME, EMPTY_START_ROW, TimeUnit.MILLISECONDS.toNanos(500)).get();
+    HRegionLocation loc = LOCATOR.getRegionLocation(TABLE_NAME, EMPTY_START_ROW,
+      RegionLocateType.CURRENT, TimeUnit.MILLISECONDS.toNanos(500)).get();
     assertEquals(loc.getServerName(),
       TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/09bb4287/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
index f76e240..4a391e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
@@ -154,7 +154,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
         new AsyncRegionLocator(asyncConn, AsyncConnectionImpl.RETRY_TIMER) {
           @Override
           CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
-              long timeoutNs) {
+              RegionLocateType locateType, long timeoutNs) {
             if (tableName.equals(TABLE_NAME)) {
               CompletableFuture<HRegionLocation> future = new CompletableFuture<>();
               if (count.getAndIncrement() == 0) {
@@ -165,17 +165,11 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
               }
               return future;
             } else {
-              return super.getRegionLocation(tableName, row, timeoutNs);
+              return super.getRegionLocation(tableName, row, locateType, timeoutNs);
             }
           }
 
           @Override
-          CompletableFuture<HRegionLocation> getPreviousRegionLocation(TableName tableName,
-              byte[] startRowOfCurrentRegion, long timeoutNs) {
-            return super.getPreviousRegionLocation(tableName, startRowOfCurrentRegion, timeoutNs);
-          }
-
-          @Override
           void updateCachedLocation(HRegionLocation loc, Throwable exception) {
           }
         };


[40/50] [abbrv] hbase git commit: HBASE-17385 Change usage documentation from bin/hbase to hbase in various tools

Posted by sy...@apache.org.
HBASE-17385 Change usage documentation from bin/hbase to hbase in various tools

Signed-off-by: Enis Soztutar <en...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7572e96e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7572e96e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7572e96e

Branch: refs/heads/hbase-12439
Commit: 7572e96e3abdc6636b53df3ca8738f467334a9ea
Parents: 001a26d
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Thu Dec 29 16:31:52 2016 +0100
Committer: Enis Soztutar <en...@apache.org>
Committed: Thu Dec 29 15:20:43 2016 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java  | 2 +-
 .../java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java   | 4 ++--
 .../java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java | 2 +-
 .../main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java   | 2 +-
 .../main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java   | 2 +-
 .../main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java   | 2 +-
 .../hadoop/hbase/mapreduce/replication/VerifyReplication.java    | 2 +-
 .../org/apache/hadoop/hbase/regionserver/CompactionTool.java     | 4 ++--
 .../hadoop/hbase/regionserver/HRegionServerCommandLine.java      | 2 +-
 .../apache/hadoop/hbase/replication/master/TableCFsUpdater.java  | 2 +-
 .../hbase/replication/regionserver/DumpReplicationQueues.java    | 2 +-
 .../main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/tool/Canary.java       | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/util/Merge.java        | 2 +-
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 4 ++--
 .../org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java    | 4 ++--
 .../main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java   | 4 ++--
 .../main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java  | 4 ++--
 19 files changed, 25 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
index fcc8b0c..b5484dd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZkAclReset.java
@@ -78,7 +78,7 @@ public class ZkAclReset extends Configured implements Tool {
   }
 
   private void printUsageAndExit() {
-    System.err.printf("Usage: bin/hbase %s [options]%n", getClass().getName());
+    System.err.printf("Usage: hbase %s [options]%n", getClass().getName());
     System.err.println(" where [options] are:");
     System.err.println("  -h|-help                Show this help and exit.");
     System.err.println("  -set-acls               Setup the hbase znode ACLs for a secure cluster");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
index b5beaae..a790920 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -162,7 +162,7 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
   }
 
   protected void printUsage() {
-    printUsage("bin/hbase " + getClass().getName() + " <options>", "Options:", "");
+    printUsage("hbase " + getClass().getName() + " <options>", "Options:", "");
   }
 
   protected void printUsage(final String usageStr, final String usageHeader,

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index 00fd834..7cec152 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -84,9 +84,9 @@ public class RESTServer implements Constants {
 
   private static void printUsageAndExit(Options options, int exitCode) {
     HelpFormatter formatter = new HelpFormatter();
-    formatter.printHelp("bin/hbase rest start", "", options,
+    formatter.printHelp("hbase rest start", "", options,
       "\nTo run the REST server as a daemon, execute " +
-      "bin/hbase-daemon.sh start|stop rest [--infoport <port>] [-p <port>] [-ro]\n", true);
+      "hbase-daemon.sh start|stop rest [--infoport <port>] [-p <port>] [-ro]\n", true);
     System.exit(exitCode);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
index 7dca16a..0d29159 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/PerformanceEvaluation.java
@@ -1406,7 +1406,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
     System.err.println("               running: 1 <= value <= 500");
     System.err.println("Examples:");
     System.err.println(" To run a single evaluation client:");
-    System.err.println(" $ bin/hbase " + this.getClass().getName()
+    System.err.println(" $ hbase " + this.getClass().getName()
         + " sequentialWrite 1");
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
index c1e8a82..819ef57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java
@@ -211,7 +211,7 @@ public class CopyTable extends Configured implements Tool {
     System.err.println();
     System.err.println("Examples:");
     System.err.println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:");
-    System.err.println(" $ bin/hbase " +
+    System.err.println(" $ hbase " +
         "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " +
         "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable ");
     System.err.println("For performance consider the following general option:\n"

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
index 43c72c4..674cb57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java
@@ -624,7 +624,7 @@ public class HashTable extends Configured implements Tool {
     System.err.println();
     System.err.println("Examples:");
     System.err.println(" To hash 'TestTable' in 32kB batches for a 1 hour window into 50 files:");
-    System.err.println(" $ bin/hbase " +
+    System.err.println(" $ hbase " +
         "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50"
         + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3"
         + " TestTable /hashes/testTable");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index 2eb7a24..0e2842b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -699,7 +699,7 @@ public class SyncTable extends Configured implements Tool {
     System.err.println("Examples:");
     System.err.println(" For a dry run SyncTable of tableA from a remote source cluster");
     System.err.println(" to a local target cluster:");
-    System.err.println(" $ bin/hbase " +
+    System.err.println(" $ hbase " +
         "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true"
         + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase"
         + " hdfs://nn:9000/hashes/tableA tableA tableA");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index 6801a61..ff703bc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -540,7 +540,7 @@ public class VerifyReplication extends Configured implements Tool {
     System.err.println();
     System.err.println("Examples:");
     System.err.println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 ");
-    System.err.println(" $ bin/hbase " +
+    System.err.println(" $ hbase " +
         "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" +
         " --starttime=1265875194289 --endtime=1265878794289 5 TestTable ");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index e35c686..a47228b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -460,10 +460,10 @@ public class CompactionTool extends Configured implements Tool {
     System.err.println();
     System.err.println("Examples:");
     System.err.println(" To compact the full 'TestTable' using MapReduce:");
-    System.err.println(" $ bin/hbase " + this.getClass().getName() + " -mapred hdfs:///hbase/data/default/TestTable");
+    System.err.println(" $ hbase " + this.getClass().getName() + " -mapred hdfs:///hbase/data/default/TestTable");
     System.err.println();
     System.err.println(" To compact column family 'x' of the table 'TestTable' region 'abc':");
-    System.err.println(" $ bin/hbase " + this.getClass().getName() + " hdfs:///hbase/data/default/TestTable/abc/x");
+    System.err.println(" $ hbase " + this.getClass().getName() + " hdfs:///hbase/data/default/TestTable/abc/x");
   }
 
   public static void main(String[] args) throws Exception {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
index c968bbe..348527d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
@@ -88,7 +88,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
     } else if ("stop".equals(cmd)) {
       System.err.println(
         "To shutdown the regionserver run " +
-        "bin/hbase-daemon.sh stop regionserver or send a kill signal to " +
+        "hbase-daemon.sh stop regionserver or send a kill signal to " +
         "the regionserver pid");
       return 1;
     } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
index cf3705b..1494892 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
@@ -121,7 +121,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase {
   }
 
   private static void printUsageAndExit() {
-    System.err.printf("Usage: bin/hbase org.apache.hadoop.hbase.replication.master.TableCFsUpdater [options]");
+    System.err.printf("Usage: hbase org.apache.hadoop.hbase.replication.master.TableCFsUpdater [options]");
     System.err.println(" where [options] are:");
     System.err.println("  -h|-help    Show this help and exit.");
     System.err.println("  update      Copy table-cfs to replication peer config");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
index 683f30c..4502141 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/DumpReplicationQueues.java
@@ -172,7 +172,7 @@ public class DumpReplicationQueues extends Configured implements Tool {
     if (message != null && message.length() > 0) {
       System.err.println(message);
     }
-    System.err.println("Usage: bin/hbase " + className + " \\");
+    System.err.println("Usage: hbase " + className + " \\");
     System.err.println("  <OPTIONS> [-D<property=value>]*");
     System.err.println();
     System.err.println("General Options:");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
index 3922348..3fb445c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
@@ -541,7 +541,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
 
   @Override
   protected void printUsage() {
-    printUsage("bin/hbase snapshot info [options]", "Options:", "");
+    printUsage("hbase snapshot info [options]", "Options:", "");
     System.err.println("Examples:");
     System.err.println("  hbase snapshot info --snapshot MySnapshot --files");
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 6c4befc..3d77ca1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -776,7 +776,7 @@ public final class Canary implements Tool {
 
   private void printUsageAndExit() {
     System.err.printf(
-      "Usage: bin/hbase %s [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]%n",
+      "Usage: hbase %s [opts] [table1 [table2]...] | [regionserver1 [regionserver2]..]%n",
         getClass().getName());
     System.err.println(" where [opts] are:");
     System.err.println("   -help          Show this help and exit.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
index 3c81cfe..7b96660 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
@@ -247,7 +247,7 @@ public class Merge extends Configured implements Tool {
 
   private void usage() {
     System.err
-        .println("For hadoop 0.21+, Usage: bin/hbase org.apache.hadoop.hbase.util.Merge "
+        .println("For hadoop 0.21+, Usage: hbase org.apache.hadoop.hbase.util.Merge "
             + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 7d94a02..d1fb7f8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1914,9 +1914,9 @@ public class PerformanceEvaluation extends Configured implements Tool {
         + "(and HRegionServers) running. 1 <= value <= 500");
     System.err.println("Examples:");
     System.err.println(" To run a single client doing the default 1M sequentialWrites:");
-    System.err.println(" $ bin/hbase " + className + " sequentialWrite 1");
+    System.err.println(" $ hbase " + className + " sequentialWrite 1");
     System.err.println(" To run 10 clients doing increments over ten rows:");
-    System.err.println(" $ bin/hbase " + className + " --rows=10 --nomapred increment 10");
+    System.err.println(" $ hbase " + className + " --rows=10 --nomapred increment 10");
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 7300ee4..9bb3d7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -455,7 +455,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
   }
 
   private void printUsageAndExit() {
-    System.err.printf("Usage: bin/hbase %s [options]\n", getClass().getName());
+    System.err.printf("Usage: hbase %s [options]\n", getClass().getName());
     System.err.println(" where [options] are:");
     System.err.println("  -h|-help         Show this help and exit.");
     System.err.println("  -threads <N>     Number of threads writing on the WAL.");
@@ -483,7 +483,7 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
     System.err.println("");
     System.err.println(" To run 100 threads on hdfs with log rolling every 10k edits and " +
       "verification afterward do:");
-    System.err.println(" $ ./bin/hbase org.apache.hadoop.hbase.wal." +
+    System.err.println(" $ hbase org.apache.hadoop.hbase.wal." +
       "WALPerformanceEvaluation \\");
     System.err.println("    -conf ./core-site.xml -path hdfs://example.org:7000/tmp " +
       "-threads 100 -roll 10000 -verify");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
index 254c4b7..6d754ce 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServer.java
@@ -78,8 +78,8 @@ public class ThriftServer {
       throws ExitCodeException {
     HelpFormatter formatter = new HelpFormatter();
     formatter.printHelp("Thrift", null, options,
-        "To start the Thrift server run 'bin/hbase-daemon.sh start thrift'\n" +
-        "To shutdown the thrift server run 'bin/hbase-daemon.sh stop " +
+        "To start the Thrift server run 'hbase-daemon.sh start thrift'\n" +
+        "To shutdown the thrift server run 'hbase-daemon.sh stop " +
         "thrift' or send a kill signal to the thrift server pid",
         true);
     throw new ExitCodeException(exitCode, "");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7572e96e/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 5d35674..7b1f9fa 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -131,8 +131,8 @@ public class ThriftServer extends Configured implements Tool {
   private static void printUsage() {
     HelpFormatter formatter = new HelpFormatter();
     formatter.printHelp("Thrift", null, getOptions(),
-        "To start the Thrift server run 'bin/hbase-daemon.sh start thrift2'\n" +
-            "To shutdown the thrift server run 'bin/hbase-daemon.sh stop thrift2' or" +
+        "To start the Thrift server run 'hbase-daemon.sh start thrift2'\n" +
+            "To shutdown the thrift server run 'hbase-daemon.sh stop thrift2' or" +
             " send a kill signal to the thrift server pid",
         true);
   }


[30/50] [abbrv] hbase git commit: HBASE-17174 Addendum fix typo

Posted by sy...@apache.org.
HBASE-17174 Addendum fix typo

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ffbd4a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ffbd4a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ffbd4a8

Branch: refs/heads/hbase-12439
Commit: 5ffbd4a87d73caee3ab27ae8d6a47cd36afc859f
Parents: e18e9a2
Author: ChiaPing Tsai <ch...@gmail.com>
Authored: Tue Dec 27 13:57:51 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Dec 27 13:57:51 2016 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/client/RequestController.java     | 2 +-
 .../org/apache/hadoop/hbase/client/SimpleRequestController.java    | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ffbd4a8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
index 7e9c968..46e730e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
@@ -95,7 +95,7 @@ public interface RequestController {
   /**
    * @return The number of running task.
    */
-  long getNumberOfTsksInProgress();
+  long getNumberOfTasksInProgress();
 
   /**
    * Waits for the running tasks to complete.

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ffbd4a8/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
index 473f264..6343af6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
@@ -246,7 +246,7 @@ class SimpleRequestController implements RequestController {
   }
 
   @Override
-  public long getNumberOfTsksInProgress() {
+  public long getNumberOfTasksInProgress() {
     return tasksInProgress.get();
   }
 


[39/50] [abbrv] hbase git commit: HBASE-17374 ZKPermissionWatcher crashed when grant after region close (Liu Junhong)

Posted by sy...@apache.org.
HBASE-17374 ZKPermissionWatcher crashed when grant after region close (Liu Junhong)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/001a26d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/001a26d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/001a26d4

Branch: refs/heads/hbase-12439
Commit: 001a26d404ca39ab6dbb9efeb59c08f20938f112
Parents: 05b1d91
Author: tedyu <yu...@gmail.com>
Authored: Wed Dec 28 19:54:01 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Dec 28 19:54:01 2016 -0800

----------------------------------------------------------------------
 .../security/access/ZKPermissionWatcher.java    | 23 ++++++++++++++++----
 1 file changed, 19 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/001a26d4/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
index 308ef41..f21e877 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java
@@ -38,6 +38,7 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
 
 /**
@@ -120,7 +121,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
   public void nodeCreated(String path) {
     waitUntilStarted();
     if (path.equals(aclZNode)) {
-      executor.submit(new Runnable() {
+      asyncProcessNodeUpdate(new Runnable() {
         @Override
         public void run() {
           try {
@@ -141,7 +142,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
   public void nodeDeleted(final String path) {
     waitUntilStarted();
     if (aclZNode.equals(ZKUtil.getParent(path))) {
-      executor.submit(new Runnable() {
+      asyncProcessNodeUpdate(new Runnable() {
         @Override
         public void run() {
           String table = ZKUtil.getNodeName(path);
@@ -159,7 +160,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
   public void nodeDataChanged(final String path) {
     waitUntilStarted();
     if (aclZNode.equals(ZKUtil.getParent(path))) {
-      executor.submit(new Runnable() {
+      asyncProcessNodeUpdate(new Runnable() {
         @Override
         public void run() {
           // update cache on an existing table node
@@ -198,7 +199,7 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
         LOG.error("Error reading data from zookeeper for path "+path, ke);
         watcher.abort("ZooKeeper error get node children for path "+path, ke);
       }
-      executor.submit(new Runnable() {
+      asyncProcessNodeUpdate(new Runnable() {
         // allows subsequent nodeChildrenChanged event to preempt current processing of
         // nodeChildrenChanged event
         @Override
@@ -211,6 +212,20 @@ public class ZKPermissionWatcher extends ZooKeeperListener implements Closeable
     }
   }
 
+  private void asyncProcessNodeUpdate(Runnable runnable) {
+    if (!executor.isShutdown()) {
+      try {
+        executor.submit(runnable);
+      } catch (RejectedExecutionException e) {
+        if (executor.isShutdown()) {
+          LOG.warn("aclZNode changed after ZKPermissionWatcher was shutdown");
+        } else {
+          throw e;
+        }
+      }
+    }
+  }
+
   private void refreshNodes(List<ZKUtil.NodeAndData> nodes, AtomicReference ref) {
     for (ZKUtil.NodeAndData n : nodes) {
       if (ref != null && ref.get() != null) {


[06/50] [abbrv] hbase git commit: Revert "HBASE-17314 Limit total buffered size for all replication sources"

Posted by sy...@apache.org.
Revert "HBASE-17314 Limit total buffered size for all replication sources"

This reverts commit 3826e639672eea11d73da333e6c15f6b7c23a46c.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a1d2ff46
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a1d2ff46
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a1d2ff46

Branch: refs/heads/hbase-12439
Commit: a1d2ff4646743a9136bb1182c0512bce28e358b7
Parents: acd0218
Author: Michael Stack <st...@apache.org>
Authored: Wed Dec 21 11:17:28 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Dec 21 11:17:28 2016 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/HConstants.java     |   4 -
 .../hbase/regionserver/HRegionServer.java       |   3 +-
 .../regionserver/ReplicationSource.java         |  38 +---
 .../regionserver/ReplicationSourceManager.java  |   8 -
 .../replication/TestReplicationEndpoint.java    |   3 +-
 .../regionserver/TestGlobalThrottler.java       | 184 -------------------
 6 files changed, 10 insertions(+), 230 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index dc96c2a..48d9778 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -932,10 +932,6 @@ public final class HConstants {
   public static final long
       REPLICATION_SERIALLY_WAITING_DEFAULT = 10000;
 
-  public static final String REPLICATION_SOURCE_TOTAL_BUFFER_KEY = "replication.total.buffer.quota";
-  public static final int REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT = 256 * 1024 * 1024;
-
-
   /**
    * Directory where the source cluster file system client configuration are placed which is used by
    * sink cluster to copy HFiles from source cluster file system

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 853d699..5bc0a66 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -2340,8 +2340,7 @@ public class HRegionServer extends HasThread implements
    * @return Return the object that implements the replication
    * source service.
    */
-  @VisibleForTesting
-  public ReplicationSourceService getReplicationSourceService() {
+  ReplicationSourceService getReplicationSourceService() {
     return replicationSourceHandler;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 3fb5f94..f777282 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -38,7 +38,6 @@ import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.PriorityBlockingQueue;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang.StringUtils;
@@ -151,9 +150,6 @@ public class ReplicationSource extends Thread
   private ConcurrentHashMap<String, ReplicationSourceWorkerThread> workerThreads =
       new ConcurrentHashMap<String, ReplicationSourceWorkerThread>();
 
-  private AtomicInteger totalBufferUsed;
-  private int totalBufferQuota;
-
   /**
    * Instantiation method used by region servers
    *
@@ -205,9 +201,7 @@ public class ReplicationSource extends Thread
     defaultBandwidth = this.conf.getLong("replication.source.per.peer.node.bandwidth", 0);
     currentBandwidth = getCurrentBandwidth();
     this.throttler = new ReplicationThrottler((double) currentBandwidth / 10.0);
-    this.totalBufferUsed = manager.getTotalBufferUsed();
-    this.totalBufferQuota = conf.getInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY,
-        HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_DFAULT);
+
     LOG.info("peerClusterZnode=" + peerClusterZnode + ", ReplicationSource : " + peerId
         + " inited, replicationQueueSizeCapacity=" + replicationQueueSizeCapacity
         + ", replicationQueueNbCapacity=" + replicationQueueNbCapacity + ", curerntBandwidth="
@@ -542,7 +536,7 @@ public class ReplicationSource extends Thread
     private boolean workerRunning = true;
     // Current number of hfiles that we need to replicate
     private long currentNbHFiles = 0;
-    List<WAL.Entry> entries;
+
     // Use guava cache to set ttl for each key
     private LoadingCache<String, Boolean> canSkipWaitingSet = CacheBuilder.newBuilder()
         .expireAfterAccess(1, TimeUnit.DAYS).build(
@@ -562,7 +556,6 @@ public class ReplicationSource extends Thread
       this.replicationQueueInfo = replicationQueueInfo;
       this.repLogReader = new ReplicationWALReaderManager(fs, conf);
       this.source = source;
-      this.entries = new ArrayList<>();
     }
 
     @Override
@@ -635,7 +628,8 @@ public class ReplicationSource extends Thread
         boolean gotIOE = false;
         currentNbOperations = 0;
         currentNbHFiles = 0;
-        entries.clear();
+        List<WAL.Entry> entries = new ArrayList<WAL.Entry>(1);
+
         Map<String, Long> lastPositionsForSerialScope = new HashMap<>();
         currentSize = 0;
         try {
@@ -727,7 +721,6 @@ public class ReplicationSource extends Thread
           continue;
         }
         shipEdits(currentWALisBeingWrittenTo, entries, lastPositionsForSerialScope);
-        releaseBufferQuota();
       }
       if (replicationQueueInfo.isQueueRecovered()) {
         // use synchronize to make sure one last thread will clean the queue
@@ -817,7 +810,7 @@ public class ReplicationSource extends Thread
             }
           }
         }
-        boolean totalBufferTooLarge = false;
+
         // don't replicate if the log entries have already been consumed by the cluster
         if (replicationEndpoint.canReplicateToSameCluster()
             || !entry.getKey().getClusterIds().contains(peerClusterId)) {
@@ -835,16 +828,15 @@ public class ReplicationSource extends Thread
             logKey.addClusterId(clusterId);
             currentNbOperations += countDistinctRowKeys(edit);
             entries.add(entry);
-            int delta = (int)entry.getEdit().heapSize() + calculateTotalSizeOfStoreFiles(edit);
-            currentSize += delta;
-            totalBufferTooLarge = acquireBufferQuota(delta);
+            currentSize += entry.getEdit().heapSize();
+            currentSize += calculateTotalSizeOfStoreFiles(edit);
           } else {
             metrics.incrLogEditsFiltered();
           }
         }
         // Stop if too many entries or too big
         // FIXME check the relationship between single wal group and overall
-        if (totalBufferTooLarge || currentSize >= replicationQueueSizeCapacity
+        if (currentSize >= replicationQueueSizeCapacity
             || entries.size() >= replicationQueueNbCapacity) {
           break;
         }
@@ -1325,19 +1317,5 @@ public class ReplicationSource extends Thread
     public void setWorkerRunning(boolean workerRunning) {
       this.workerRunning = workerRunning;
     }
-
-    /**
-     * @param size delta size for grown buffer
-     * @return true if we should clear buffer and push all
-     */
-    private boolean acquireBufferQuota(int size) {
-      return totalBufferUsed.addAndGet(size) >= totalBufferQuota;
-    }
-
-    private void releaseBufferQuota() {
-      totalBufferUsed.addAndGet(-currentSize);
-      currentSize = 0;
-      entries.clear();
-    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 2634a52..2c9fdcc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -42,7 +42,6 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -127,8 +126,6 @@ public class ReplicationSourceManager implements ReplicationListener {
   private Connection connection;
   private long replicationWaitTime;
 
-  private AtomicInteger totalBufferUsed = new AtomicInteger();
-
   /**
    * Creates a replication manager and sets the watch on all the other registered region servers
    * @param replicationQueues the interface for manipulating replication queues
@@ -438,11 +435,6 @@ public class ReplicationSourceManager implements ReplicationListener {
     }
   }
 
-  @VisibleForTesting
-  AtomicInteger getTotalBufferUsed() {
-    return totalBufferUsed;
-  }
-
   /**
    * Factory method to create a replication source
    * @param conf the configuration to use

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
index f9c467e..002b8c9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationEndpoint.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.replication;
 
 import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -362,7 +361,7 @@ public class TestReplicationEndpoint extends TestReplicationBase {
     @Override
     public boolean replicate(ReplicateContext replicateContext) {
       replicateCount.incrementAndGet();
-      lastEntries = new ArrayList<>(replicateContext.entries);
+      lastEntries = replicateContext.entries;
       return true;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a1d2ff46/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
deleted file mode 100644
index a40d7ed..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.replication.regionserver;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HTestConst;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.ReplicationTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ ReplicationTests.class, LargeTests.class })
-public class TestGlobalThrottler {
-  private static final Log LOG = LogFactory.getLog(TestGlobalThrottler.class);
-  private static Configuration conf1;
-  private static Configuration conf2;
-
-  private static HBaseTestingUtility utility1;
-  private static HBaseTestingUtility utility2;
-
-  private static final byte[] famName = Bytes.toBytes("f");
-  private static final byte[] VALUE = Bytes.toBytes("v");
-  private static final byte[] ROW = Bytes.toBytes("r");
-  private static final byte[][] ROWS = HTestConst.makeNAscii(ROW, 100);
-
-  @BeforeClass
-  public static void setUpBeforeClass() throws Exception {
-    conf1 = HBaseConfiguration.create();
-    conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
-    conf1.setLong("replication.source.sleepforretries", 100);
-    // Each WAL is about 120 bytes
-    conf1.setInt(HConstants.REPLICATION_SOURCE_TOTAL_BUFFER_KEY, 200);
-    conf1.setLong("replication.source.per.peer.node.bandwidth", 100L);
-
-    utility1 = new HBaseTestingUtility(conf1);
-    utility1.startMiniZKCluster();
-    MiniZooKeeperCluster miniZK = utility1.getZkCluster();
-    new ZooKeeperWatcher(conf1, "cluster1", null, true);
-
-    conf2 = new Configuration(conf1);
-    conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2");
-
-    utility2 = new HBaseTestingUtility(conf2);
-    utility2.setZkCluster(miniZK);
-    new ZooKeeperWatcher(conf2, "cluster2", null, true);
-
-    ReplicationAdmin admin1 = new ReplicationAdmin(conf1);
-    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
-    rpc.setClusterKey(utility2.getClusterKey());
-    admin1.addPeer("peer1", rpc, null);
-    admin1.addPeer("peer2", rpc, null);
-    admin1.addPeer("peer3", rpc, null);
-
-    utility1.startMiniCluster(1, 1);
-    utility2.startMiniCluster(1, 1);
-  }
-
-  @AfterClass
-  public static void setDownAfterClass() throws Exception {
-    utility2.shutdownMiniCluster();
-    utility1.shutdownMiniCluster();
-  }
-
-
-  volatile private boolean testQuotaPass = false;
-  volatile private boolean testQuotaNonZero = false;
-  @Test
-  public void testQuota() throws IOException {
-    TableName tableName = TableName.valueOf("testQuota");
-    HTableDescriptor table = new HTableDescriptor(tableName);
-    HColumnDescriptor fam = new HColumnDescriptor(famName);
-    fam.setScope(HConstants.REPLICATION_SCOPE_SERIAL);
-    table.addFamily(fam);
-    utility1.getHBaseAdmin().createTable(table);
-    utility2.getHBaseAdmin().createTable(table);
-
-    Thread watcher = new Thread(()->{
-      Replication replication = (Replication)utility1.getMiniHBaseCluster()
-          .getRegionServer(0).getReplicationSourceService();
-      AtomicInteger bufferUsed = replication.getReplicationManager().getTotalBufferUsed();
-      testQuotaPass = true;
-      while (!Thread.interrupted()) {
-        int size = bufferUsed.get();
-        if (size > 0) {
-          testQuotaNonZero = true;
-        }
-        if (size > 600) {
-          // We read logs first then check throttler, so if the buffer quota limiter doesn't
-          // take effect, it will push many logs and exceed the quota.
-          testQuotaPass = false;
-        }
-        Threads.sleep(50);
-      }
-    });
-    watcher.start();
-
-    try(Table t1 = utility1.getConnection().getTable(tableName);
-        Table t2 = utility2.getConnection().getTable(tableName)) {
-      for (int i = 0; i < 50; i++) {
-        Put put = new Put(ROWS[i]);
-        put.addColumn(famName, VALUE, VALUE);
-        t1.put(put);
-      }
-      long start = EnvironmentEdgeManager.currentTime();
-      while (EnvironmentEdgeManager.currentTime() - start < 180000) {
-        Scan scan = new Scan();
-        scan.setCaching(50);
-        int count = 0;
-        try (ResultScanner results = t2.getScanner(scan)) {
-          for (Result result : results) {
-            count++;
-          }
-        }
-        if (count < 50) {
-          LOG.info("Waiting for all logs pushed to slave. Expected 50 , actual " + count);
-          Threads.sleep(200);
-          continue;
-        }
-        break;
-      }
-    }
-
-    watcher.interrupt();
-    Assert.assertTrue(testQuotaPass);
-    Assert.assertTrue(testQuotaNonZero);
-  }
-
-  private List<Integer> getRowNumbers(List<Cell> cells) {
-    List<Integer> listOfRowNumbers = new ArrayList<>();
-    for (Cell c : cells) {
-      listOfRowNumbers.add(Integer.parseInt(Bytes
-          .toString(c.getRowArray(), c.getRowOffset() + ROW.length,
-              c.getRowLength() - ROW.length)));
-    }
-    return listOfRowNumbers;
-  }
-}


[34/50] [abbrv] hbase git commit: HBASE-17090 Procedure v2 - fast wake if nothing else is running (Matteo Bertozzi)

Posted by sy...@apache.org.
HBASE-17090 Procedure v2 - fast wake if nothing else is running (Matteo Bertozzi)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da97569e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da97569e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da97569e

Branch: refs/heads/hbase-12439
Commit: da97569eae662ad90fd3afd98ef148c94eee4ac1
Parents: 306ef83
Author: Michael Stack <st...@apache.org>
Authored: Tue Dec 27 16:19:32 2016 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 27 16:19:32 2016 -0800

----------------------------------------------------------------------
 .../hbase/procedure2/ProcedureExecutor.java     |  4 +-
 .../procedure2/store/NoopProcedureStore.java    |  5 +++
 .../hbase/procedure2/store/ProcedureStore.java  |  6 +++
 .../procedure2/store/wal/WALProcedureStore.java | 42 ++++++++++++++------
 .../wal/ProcedureWALPerformanceEvaluation.java  | 17 +++-----
 5 files changed, 48 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/da97569e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 80c3804..c65f3fb 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -1536,7 +1536,7 @@ public class ProcedureExecutor<TEnvironment> {
         final Procedure procedure = scheduler.poll(keepAliveTime, TimeUnit.MILLISECONDS);
         if (procedure == null) continue;
 
-        activeExecutorCount.incrementAndGet();
+        store.setRunningProcedureCount(activeExecutorCount.incrementAndGet());
         executionStartTime.set(EnvironmentEdgeManager.currentTime());
         try {
           if (isTraceEnabled) {
@@ -1544,7 +1544,7 @@ public class ProcedureExecutor<TEnvironment> {
           }
           executeProcedure(procedure);
         } finally {
-          activeExecutorCount.decrementAndGet();
+          store.setRunningProcedureCount(activeExecutorCount.decrementAndGet());
           lastUpdate = EnvironmentEdgeManager.currentTime();
           executionStartTime.set(Long.MAX_VALUE);
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da97569e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java
index f248dc3..c03e326 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java
@@ -52,6 +52,11 @@ public class NoopProcedureStore extends ProcedureStoreBase {
   }
 
   @Override
+  public void setRunningProcedureCount(final int count) {
+    // no-op
+  }
+
+  @Override
   public void load(final ProcedureLoader loader) throws IOException {
     loader.setMaxProcId(0);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/da97569e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
index e47ed63..032c8fc 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java
@@ -151,6 +151,12 @@ public interface ProcedureStore {
   int getNumThreads();
 
   /**
+   * Set the number of procedure running.
+   * This can be used, for example, by the store to know how long to wait before a sync.
+   */
+  void setRunningProcedureCount(int count);
+
+  /**
    * Acquire the lease for the procedure store.
    */
   void recoverLease() throws IOException;

http://git-wip-us.apache.org/repos/asf/hbase/blob/da97569e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 922b681..4465993 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -136,7 +136,9 @@ public class WALProcedureStore extends ProcedureStoreBase {
   private LinkedTransferQueue<ByteSlot> slotsCache = null;
   private Set<ProcedureWALFile> corruptedLogs = null;
   private FSDataOutputStream stream = null;
+  private int runningProcCount = 1;
   private long flushLogId = 0;
+  private int syncMaxSlot = 1;
   private int slotIndex = 0;
   private Thread syncThread;
   private ByteSlot[] slots;
@@ -198,6 +200,8 @@ public class WALProcedureStore extends ProcedureStoreBase {
 
     // Init buffer slots
     loading.set(true);
+    runningProcCount = numSlots;
+    syncMaxSlot = numSlots;
     slots = new ByteSlot[numSlots];
     slotsCache = new LinkedTransferQueue();
     while (slotsCache.size() < numSlots) {
@@ -288,6 +292,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
     return slots == null ? 0 : slots.length;
   }
 
+  @Override
+  public void setRunningProcedureCount(final int count) {
+    LOG.debug("set running procedure count=" + count + " slots=" + slots.length);
+    this.runningProcCount = count > 0 ? Math.min(count, slots.length) : slots.length;
+  }
+
   public ProcedureStoreTracker getStoreTracker() {
     return storeTracker;
   }
@@ -623,7 +633,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
           throw new RuntimeException("sync aborted", syncException.get());
         } else if (inSync.get()) {
           syncCond.await();
-        } else if (slotIndex == slots.length) {
+        } else if (slotIndex >= syncMaxSlot) {
           slotCond.signal();
           syncCond.await();
         } else {
@@ -642,7 +652,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
       }
 
       // Notify that the slots are full
-      if (slotIndex == slots.length) {
+      if (slotIndex == syncMaxSlot) {
         waitCond.signal();
         slotCond.signal();
       }
@@ -725,8 +735,10 @@ public class WALProcedureStore extends ProcedureStoreBase {
             }
           }
           // Wait SYNC_WAIT_MSEC or the signal of "slots full" before flushing
+          syncMaxSlot = runningProcCount;
+          assert syncMaxSlot > 0 : "unexpected syncMaxSlot=" + syncMaxSlot;
           final long syncWaitSt = System.currentTimeMillis();
-          if (slotIndex != slots.length) {
+          if (slotIndex != syncMaxSlot) {
             slotCond.await(syncWaitMsec, TimeUnit.MILLISECONDS);
           }
 
@@ -734,7 +746,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
           final long syncWaitMs = currentTs - syncWaitSt;
           final float rollSec = getMillisFromLastRoll() / 1000.0f;
           final float syncedPerSec = totalSyncedToStore / rollSec;
-          if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < slots.length)) {
+          if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < syncMaxSlot)) {
             LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)",
                       StringUtils.humanTimeDiff(syncWaitMs), slotIndex,
                       StringUtils.humanSize(totalSyncedToStore),
@@ -813,29 +825,33 @@ public class WALProcedureStore extends ProcedureStoreBase {
     return totalSynced;
   }
 
-  protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count)
-      throws IOException {
+  protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots,
+      final int offset, final int count) throws IOException {
     long totalSynced = 0;
     for (int i = 0; i < count; ++i) {
-      ByteSlot data = slots[offset + i];
+      final ByteSlot data = slots[offset + i];
       data.writeTo(stream);
       totalSynced += data.size();
     }
 
-    if (useHsync) {
-      stream.hsync();
-    } else {
-      stream.hflush();
-    }
+    syncStream(stream);
     sendPostSyncSignal();
 
     if (LOG.isTraceEnabled()) {
-      LOG.trace("Sync slots=" + count + '/' + slots.length +
+      LOG.trace("Sync slots=" + count + '/' + syncMaxSlot +
                 ", flushed=" + StringUtils.humanSize(totalSynced));
     }
     return totalSynced;
   }
 
+  protected void syncStream(final FSDataOutputStream stream) throws IOException {
+    if (useHsync) {
+      stream.hsync();
+    } else {
+      stream.hflush();
+    }
+  }
+
   private boolean rollWriterWithRetries() {
     for (int i = 0; i < rollRetries && isRunning(); ++i) {
       if (i > 0) Threads.sleepWithoutInterrupt(waitBeforeRoll * i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/da97569e/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java
index 363574b..641ac8e 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java
@@ -149,7 +149,7 @@ public class ProcedureWALPerformanceEvaluation extends AbstractHBaseTool {
       // Start worker threads.
       long start = System.currentTimeMillis();
       for (int i = 0; i < numThreads; i++) {
-        futures[i] = executor.submit(this.new Worker(start));
+        futures[i] = executor.submit(new Worker(start));
       }
       boolean failure = false;
       try {
@@ -197,8 +197,8 @@ public class ProcedureWALPerformanceEvaluation extends AbstractHBaseTool {
    * If procedure store fails to roll log file (throws IOException), all threads quit, and at
    * least one returns value of {@link AbstractHBaseTool#EXIT_FAILURE}.
    */
-  class Worker implements Callable<Integer> {
-    final long start;
+  private final class Worker implements Callable<Integer> {
+    private final long start;
 
     public Worker(long start) {
       this.start = start;
@@ -243,7 +243,7 @@ public class ProcedureWALPerformanceEvaluation extends AbstractHBaseTool {
     }
   }
 
-  public class NoSyncWalProcedureStore extends WALProcedureStore {
+  private class NoSyncWalProcedureStore extends WALProcedureStore {
     public NoSyncWalProcedureStore(final Configuration conf, final FileSystem fs,
         final Path logDir) {
       super(conf, fs, logDir, new WALProcedureStore.LeaseRecovery() {
@@ -255,13 +255,8 @@ public class ProcedureWALPerformanceEvaluation extends AbstractHBaseTool {
     }
 
     @Override
-    protected long syncSlots(FSDataOutputStream stream, ByteSlot[] slots, int offset, int count)
-        throws IOException {
-      long totalSynced = 0;
-      for (int i = 0; i < count; ++i) {
-        totalSynced += slots[offset + i].size();
-      }
-      return totalSynced;
+    protected void syncStream(FSDataOutputStream stream) {
+      // no-op
     }
   }
 


[44/50] [abbrv] hbase git commit: HBASE-17336 get/update replication peer config requests should be routed through master

Posted by sy...@apache.org.
HBASE-17336 get/update replication peer config requests should be routed through master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e486656
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e486656
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e486656

Branch: refs/heads/hbase-12439
Commit: 0e48665641b16cd9b250503696b926a568063654
Parents: 1c477b2
Author: Guanghao Zhang <zg...@apache.org>
Authored: Tue Dec 27 14:20:57 2016 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Fri Dec 30 10:12:47 2016 +0800

----------------------------------------------------------------------
 .../hbase/ReplicationPeerNotFoundException.java |   36 +
 .../org/apache/hadoop/hbase/client/Admin.java   |   20 +
 .../hbase/client/ConnectionImplementation.java  |   17 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   28 +
 .../client/replication/ReplicationAdmin.java    |   48 +-
 .../hbase/shaded/protobuf/RequestConverter.java |   18 +
 .../shaded/protobuf/generated/MasterProtos.java |  290 +-
 .../protobuf/generated/ReplicationProtos.java   | 2509 +++++++++++++++++-
 .../src/main/protobuf/Master.proto              |    8 +
 .../src/main/protobuf/Replication.proto         |   17 +
 .../hbase/coprocessor/MasterObserver.java       |   42 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   27 +
 .../hbase/master/MasterCoprocessorHost.java     |   42 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   33 +
 .../hadoop/hbase/master/MasterServices.java     |   16 +
 .../master/replication/ReplicationManager.java  |   23 +-
 .../hbase/security/access/AccessController.java |   13 +
 .../hbase/util/ServerRegionReplicaUtil.java     |   19 +-
 .../replication/TestReplicationAdmin.java       |    5 +-
 .../hbase/master/MockNoopMasterServices.java    |   11 +
 .../TestRegionReplicaReplicationEndpoint.java   |   35 +-
 .../security/access/TestAccessController.java   |   31 +
 .../src/main/ruby/hbase/replication_admin.rb    |    2 +-
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |    2 +
 24 files changed, 3194 insertions(+), 98 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java
new file mode 100644
index 0000000..daf7dd5
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java
@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Thrown when a replication peer can not be found
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class ReplicationPeerNotFoundException extends DoNotRetryIOException {
+
+  private static final long serialVersionUID = 1L;
+
+  public ReplicationPeerNotFoundException(String peerId) {
+    super(peerId);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index fe3960f..2a1b782 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1860,6 +1860,26 @@ public interface Admin extends Abortable, Closeable {
   }
 
   /**
+   * Returns the configured ReplicationPeerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @return ReplicationPeerConfig for the peer
+   * @throws IOException
+   */
+  default ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException {
+    return new ReplicationPeerConfig();
+  }
+
+  /**
+   * Update the peerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig new config for the peer
+   * @throws IOException
+   */
+  default void updateReplicationPeerConfig(final String peerId,
+      final ReplicationPeerConfig peerConfig) throws IOException {
+  }
+
+  /**
    * Mark a region server as draining to prevent additional regions from getting assigned to it.
    * @param servers List of region servers to drain.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index ceac3fb..ea11c25 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -100,8 +100,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Disab
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
@@ -1694,6 +1698,19 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
           throws ServiceException {
         return stub.removeDrainFromRegionServers(controller, request);
       }
+
+      @Override
+      public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
+          GetReplicationPeerConfigRequest request) throws ServiceException {
+        return stub.getReplicationPeerConfig(controller, request);
+      }
+
+      @Override
+      public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(
+          RpcController controller, UpdateReplicationPeerConfigRequest request)
+          throws ServiceException {
+        return stub.updateReplicationPeerConfig(controller, request);
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ec4a5c1..89d1b49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.UnknownRegionException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@@ -171,6 +172,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
@@ -3799,6 +3801,32 @@ public class HBaseAdmin implements Admin {
   }
 
   @Override
+  public ReplicationPeerConfig getReplicationPeerConfig(final String peerId) throws IOException {
+    return executeCallable(new MasterCallable<ReplicationPeerConfig>(getConnection(),
+        getRpcControllerFactory()) {
+      @Override
+      protected ReplicationPeerConfig rpcCall() throws Exception {
+        GetReplicationPeerConfigResponse response = master.getReplicationPeerConfig(
+          getRpcController(), RequestConverter.buildGetReplicationPeerConfigRequest(peerId));
+        return ReplicationSerDeHelper.convert(response.getPeerConfig());
+      }
+    });
+  }
+
+  @Override
+  public void updateReplicationPeerConfig(final String peerId,
+      final ReplicationPeerConfig peerConfig) throws IOException {
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      protected Void rpcCall() throws Exception {
+        master.updateReplicationPeerConfig(getRpcController(),
+          RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig));
+        return null;
+      }
+    });
+  }
+
+  @Override
   public void drainRegionServers(List<ServerName> servers) throws IOException {
     final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
     for (ServerName server : servers) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 2d6c37b..d0859a4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -207,11 +207,8 @@ public class ReplicationAdmin implements Closeable {
     return ReplicationSerDeHelper.parseTableCFsFromConfig(tableCFsConfig);
   }
 
-  public void updatePeerConfig(String id, ReplicationPeerConfig peerConfig)
-      throws ReplicationException {
-    checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
-      peerConfig.getTableCFsMap());
-    this.replicationPeers.updatePeerConfig(id, peerConfig);
+  public void updatePeerConfig(String id, ReplicationPeerConfig peerConfig) throws IOException {
+    this.admin.updateReplicationPeerConfig(id, peerConfig);
   }
 
   /**
@@ -250,8 +247,8 @@ public class ReplicationAdmin implements Closeable {
     return this.replicationPeers.getAllPeerConfigs();
   }
 
-  public ReplicationPeerConfig getPeerConfig(String id) throws ReplicationException {
-    return this.replicationPeers.getReplicationPeerConfig(id);
+  public ReplicationPeerConfig getPeerConfig(String id) throws IOException {
+    return admin.getReplicationPeerConfig(id);
   }
 
   /**
@@ -261,8 +258,9 @@ public class ReplicationAdmin implements Closeable {
    * use {@link #getPeerConfig(String)} instead.
    * */
   @Deprecated
-  public String getPeerTableCFs(String id) throws ReplicationException {
-    return ReplicationSerDeHelper.convertToString(this.replicationPeers.getPeerTableCFsConfig(id));
+  public String getPeerTableCFs(String id) throws IOException {
+    ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
+    return ReplicationSerDeHelper.convertToString(peerConfig.getTableCFsMap());
   }
 
   /**
@@ -270,11 +268,13 @@ public class ReplicationAdmin implements Closeable {
    * @param id a short that identifies the cluster
    * @param tableCfs table-cfs config str
    * @throws ReplicationException
+   * @throws IOException
    * @deprecated as release of 2.0.0, and it will be removed in 3.0.0,
    * use {@link #appendPeerTableCFs(String, Map)} instead.
    */
   @Deprecated
-  public void appendPeerTableCFs(String id, String tableCfs) throws ReplicationException {
+  public void appendPeerTableCFs(String id, String tableCfs) throws ReplicationException,
+      IOException {
     appendPeerTableCFs(id, ReplicationSerDeHelper.parseTableCFsFromConfig(tableCfs));
   }
 
@@ -283,13 +283,15 @@ public class ReplicationAdmin implements Closeable {
    * @param id a short that identifies the cluster
    * @param tableCfs A map from tableName to column family names
    * @throws ReplicationException
+   * @throws IOException
    */
   public void appendPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
-      throws ReplicationException {
+      throws ReplicationException, IOException {
     if (tableCfs == null) {
       throw new ReplicationException("tableCfs is null");
     }
-    Map<TableName, List<String>> preTableCfs = this.replicationPeers.getPeerTableCFsConfig(id);
+    ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
+    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
     if (preTableCfs == null) {
       setPeerTableCFs(id, tableCfs);
       return;
@@ -314,7 +316,7 @@ public class ReplicationAdmin implements Closeable {
         }
       }
     }
-    setPeerTableCFs(id, preTableCfs);
+    updatePeerConfig(id, peerConfig);
   }
 
   /**
@@ -322,11 +324,13 @@ public class ReplicationAdmin implements Closeable {
    * @param id a short name that identifies the cluster
    * @param tableCf table-cfs config str
    * @throws ReplicationException
+   * @throws IOException
    * @deprecated as release of 2.0.0, and it will be removed in 3.0.0,
    * use {@link #removePeerTableCFs(String, Map)} instead.
    */
   @Deprecated
-  public void removePeerTableCFs(String id, String tableCf) throws ReplicationException {
+  public void removePeerTableCFs(String id, String tableCf) throws ReplicationException,
+      IOException {
     removePeerTableCFs(id, ReplicationSerDeHelper.parseTableCFsFromConfig(tableCf));
   }
 
@@ -335,13 +339,15 @@ public class ReplicationAdmin implements Closeable {
    * @param id a short name that identifies the cluster
    * @param tableCfs A map from tableName to column family names
    * @throws ReplicationException
+   * @throws IOException
    */
   public void removePeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
-      throws ReplicationException {
+      throws ReplicationException, IOException {
     if (tableCfs == null) {
       throw new ReplicationException("tableCfs is null");
     }
-    Map<TableName, List<String>> preTableCfs = this.replicationPeers.getPeerTableCFsConfig(id);
+    ReplicationPeerConfig peerConfig = admin.getReplicationPeerConfig(id);
+    Map<TableName, List<String>> preTableCfs = peerConfig.getTableCFsMap();
     if (preTableCfs == null) {
       throw new ReplicationException("Table-Cfs for peer" + id + " is null");
     }
@@ -372,7 +378,7 @@ public class ReplicationAdmin implements Closeable {
         throw new ReplicationException("No table: " + table + " in table-cfs config of peer: " + id);
       }
     }
-    setPeerTableCFs(id, preTableCfs);
+    updatePeerConfig(id, peerConfig);
   }
 
   /**
@@ -384,10 +390,10 @@ public class ReplicationAdmin implements Closeable {
    * families
    */
   public void setPeerTableCFs(String id, Map<TableName, ? extends Collection<String>> tableCfs)
-      throws ReplicationException {
-    checkNamespacesAndTableCfsConfigConflict(
-      this.replicationPeers.getReplicationPeerConfig(id).getNamespaces(), tableCfs);
-    this.replicationPeers.setPeerTableCFsConfig(id, tableCfs);
+      throws IOException {
+    ReplicationPeerConfig peerConfig = getPeerConfig(id);
+    peerConfig.setTableCFsMap(tableCfs);
+    updatePeerConfig(id, peerConfig);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 424d578..4231a82 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -115,7 +115,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -1595,4 +1597,20 @@ public final class RequestConverter {
     builder.setPeerId(peerId);
     return builder.build();
   }
+
+  public static GetReplicationPeerConfigRequest buildGetReplicationPeerConfigRequest(
+      String peerId) {
+    GetReplicationPeerConfigRequest.Builder builder = GetReplicationPeerConfigRequest.newBuilder();
+    builder.setPeerId(peerId);
+    return builder.build();
+  }
+
+  public static UpdateReplicationPeerConfigRequest buildUpdateReplicationPeerConfigRequest(
+      String peerId, ReplicationPeerConfig peerConfig) {
+    UpdateReplicationPeerConfigRequest.Builder builder = UpdateReplicationPeerConfigRequest
+        .newBuilder();
+    builder.setPeerId(peerId);
+    builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
+    return builder.build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 1794a49..d56c534 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -65991,7 +65991,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> 
         getServerNameList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -66004,7 +66004,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -66102,7 +66102,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList() {
       return serverName_;
     }
@@ -66409,7 +66409,7 @@ public final class MasterProtos {
               serverNameBuilder_ = null;
               serverName_ = other.serverName_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
+              serverNameBuilder_ = 
                 org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                    getServerNameFieldBuilder() : null;
             } else {
@@ -66645,7 +66645,7 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
            getServerNameOrBuilderList() {
         if (serverNameBuilder_ != null) {
           return serverNameBuilder_.getMessageOrBuilderList();
@@ -66671,12 +66671,12 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder>
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder> 
            getServerNameBuilderList() {
         return getServerNameFieldBuilder().getBuilderList();
       }
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
           getServerNameFieldBuilder() {
         if (serverNameBuilder_ == null) {
           serverNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
@@ -66745,7 +66745,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> 
         getServerNameList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -66758,7 +66758,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -66856,7 +66856,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList() {
       return serverName_;
     }
@@ -67163,7 +67163,7 @@ public final class MasterProtos {
               serverNameBuilder_ = null;
               serverName_ = other.serverName_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
+              serverNameBuilder_ = 
                 org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                    getServerNameFieldBuilder() : null;
             } else {
@@ -67399,7 +67399,7 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
            getServerNameOrBuilderList() {
         if (serverNameBuilder_ != null) {
           return serverNameBuilder_.getMessageOrBuilderList();
@@ -67425,12 +67425,12 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder>
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder> 
            getServerNameBuilderList() {
         return getServerNameFieldBuilder().getBuilderList();
       }
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
           getServerNameFieldBuilder() {
         if (serverNameBuilder_ == null) {
           serverNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
@@ -67872,7 +67872,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName>
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName> 
         getServerNameList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -67885,7 +67885,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList();
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
@@ -67983,7 +67983,7 @@ public final class MasterProtos {
     /**
      * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
         getServerNameOrBuilderList() {
       return serverName_;
     }
@@ -68290,7 +68290,7 @@ public final class MasterProtos {
               serverNameBuilder_ = null;
               serverName_ = other.serverName_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              serverNameBuilder_ =
+              serverNameBuilder_ = 
                 org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                    getServerNameFieldBuilder() : null;
             } else {
@@ -68526,7 +68526,7 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
            getServerNameOrBuilderList() {
         if (serverNameBuilder_ != null) {
           return serverNameBuilder_.getMessageOrBuilderList();
@@ -68552,12 +68552,12 @@ public final class MasterProtos {
       /**
        * <code>repeated .hbase.pb.ServerName server_name = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder>
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder> 
            getServerNameBuilderList() {
         return getServerNameFieldBuilder().getBuilderList();
       }
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
           getServerNameFieldBuilder() {
         if (serverNameBuilder_ == null) {
           serverNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
@@ -69775,7 +69775,31 @@ public final class MasterProtos {
 
       /**
        * <pre>
-       ** Returns a list of ServerNames marked as draining.
+       ** Return peer config for a replication peer 
+       * </pre>
+       *
+       * <code>rpc GetReplicationPeerConfig(.hbase.pb.GetReplicationPeerConfigRequest) returns (.hbase.pb.GetReplicationPeerConfigResponse);</code>
+       */
+      public abstract void getReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse> done);
+
+      /**
+       * <pre>
+       ** Update peer config for a replication peer 
+       * </pre>
+       *
+       * <code>rpc UpdateReplicationPeerConfig(.hbase.pb.UpdateReplicationPeerConfigRequest) returns (.hbase.pb.UpdateReplicationPeerConfigResponse);</code>
+       */
+      public abstract void updateReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done);
+
+      /**
+       * <pre>
+       ** Returns a list of ServerNames marked as draining. 
        * </pre>
        *
        * <code>rpc listDrainingRegionServers(.hbase.pb.ListDrainingRegionServersRequest) returns (.hbase.pb.ListDrainingRegionServersResponse);</code>
@@ -69787,7 +69811,7 @@ public final class MasterProtos {
 
       /**
        * <pre>
-       ** Mark a list of ServerNames as draining.
+       ** Mark a list of ServerNames as draining. 
        * </pre>
        *
        * <code>rpc drainRegionServers(.hbase.pb.DrainRegionServersRequest) returns (.hbase.pb.DrainRegionServersResponse);</code>
@@ -69799,7 +69823,7 @@ public final class MasterProtos {
 
       /**
        * <pre>
-       ** Unmark a list of ServerNames marked as draining.
+       ** Unmark a list of ServerNames marked as draining. 
        * </pre>
        *
        * <code>rpc removeDrainFromRegionServers(.hbase.pb.RemoveDrainFromRegionServersRequest) returns (.hbase.pb.RemoveDrainFromRegionServersResponse);</code>
@@ -70311,6 +70335,22 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
+        public  void getReplicationPeerConfig(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse> done) {
+          impl.getReplicationPeerConfig(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void updateReplicationPeerConfig(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done) {
+          impl.updateReplicationPeerConfig(controller, request, done);
+        }
+
+        @java.lang.Override
         public  void listDrainingRegionServers(
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
@@ -70481,10 +70521,14 @@ public final class MasterProtos {
             case 61:
               return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
             case 62:
-              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
+              return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request);
             case 63:
-              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+              return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
             case 64:
+              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
+            case 65:
+              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+            case 66:
               return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -70625,10 +70669,14 @@ public final class MasterProtos {
             case 61:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
             case 64:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+            case 65:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+            case 66:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -70769,10 +70817,14 @@ public final class MasterProtos {
             case 61:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
             case 64:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+            case 65:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+            case 66:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71557,7 +71609,31 @@ public final class MasterProtos {
 
     /**
      * <pre>
-     ** Returns a list of ServerNames marked as draining.
+     ** Return peer config for a replication peer 
+     * </pre>
+     *
+     * <code>rpc GetReplicationPeerConfig(.hbase.pb.GetReplicationPeerConfigRequest) returns (.hbase.pb.GetReplicationPeerConfigResponse);</code>
+     */
+    public abstract void getReplicationPeerConfig(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse> done);
+
+    /**
+     * <pre>
+     ** Update peer config for a replication peer 
+     * </pre>
+     *
+     * <code>rpc UpdateReplicationPeerConfig(.hbase.pb.UpdateReplicationPeerConfigRequest) returns (.hbase.pb.UpdateReplicationPeerConfigResponse);</code>
+     */
+    public abstract void updateReplicationPeerConfig(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done);
+
+    /**
+     * <pre>
+     ** Returns a list of ServerNames marked as draining. 
      * </pre>
      *
      * <code>rpc listDrainingRegionServers(.hbase.pb.ListDrainingRegionServersRequest) returns (.hbase.pb.ListDrainingRegionServersResponse);</code>
@@ -71569,7 +71645,7 @@ public final class MasterProtos {
 
     /**
      * <pre>
-     ** Mark a list of ServerNames as draining.
+     ** Mark a list of ServerNames as draining. 
      * </pre>
      *
      * <code>rpc drainRegionServers(.hbase.pb.DrainRegionServersRequest) returns (.hbase.pb.DrainRegionServersResponse);</code>
@@ -71581,7 +71657,7 @@ public final class MasterProtos {
 
     /**
      * <pre>
-     ** Unmark a list of ServerNames marked as draining.
+     ** Unmark a list of ServerNames marked as draining. 
      * </pre>
      *
      * <code>rpc removeDrainFromRegionServers(.hbase.pb.RemoveDrainFromRegionServersRequest) returns (.hbase.pb.RemoveDrainFromRegionServersResponse);</code>
@@ -71924,16 +72000,26 @@ public final class MasterProtos {
               done));
           return;
         case 62:
+          this.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse>specializeCallback(
+              done));
+          return;
+        case 63:
+          this.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse>specializeCallback(
+              done));
+          return;
+        case 64:
           this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse>specializeCallback(
               done));
           return;
-        case 63:
+        case 65:
           this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse>specializeCallback(
               done));
           return;
-        case 64:
+        case 66:
           this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse>specializeCallback(
               done));
@@ -72077,10 +72163,14 @@ public final class MasterProtos {
         case 61:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
         case 64:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+        case 65:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+        case 66:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -72221,10 +72311,14 @@ public final class MasterProtos {
         case 61:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
         case 64:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+        case 65:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+        case 66:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -73177,12 +73271,42 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance()));
       }
 
+      public  void getReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(62),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance()));
+      }
+
+      public  void updateReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(63),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance()));
+      }
+
       public  void listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(),
@@ -73197,7 +73321,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(),
@@ -73212,7 +73336,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(),
@@ -73539,6 +73663,16 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse getReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
@@ -74306,12 +74440,36 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse getReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(62),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(63),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse listDrainingRegionServers(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance());
@@ -74323,7 +74481,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance());
@@ -74335,7 +74493,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance());
@@ -74913,32 +75071,32 @@ public final class MasterProtos {
       internal_static_hbase_pb_SecurityCapabilitiesResponse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ListDrainingRegionServersRequest_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_ListDrainingRegionServersRequest_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ListDrainingRegionServersResponse_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_ListDrainingRegionServersResponse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_DrainRegionServersRequest_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_DrainRegionServersRequest_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_DrainRegionServersResponse_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_DrainRegionServersResponse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RemoveDrainFromRegionServersRequest_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_descriptor;
-  private static final
+  private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RemoveDrainFromRegionServersResponse_fieldAccessorTable;
 
@@ -75161,7 +75319,7 @@ public final class MasterProtos {
       "\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerN" +
       "ame\"&\n$RemoveDrainFromRegionServersRespo",
       "nse*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005ME" +
-      "RGE\020\0012\251/\n\rMasterService\022e\n\024GetSchemaAlte" +
+      "RGE\020\0012\2301\n\rMasterService\022e\n\024GetSchemaAlte" +
       "rStatus\022%.hbase.pb.GetSchemaAlterStatusR" +
       "equest\032&.hbase.pb.GetSchemaAlterStatusRe" +
       "sponse\022b\n\023GetTableDescriptors\022$.hbase.pb" +
@@ -75304,17 +75462,23 @@ public final class MasterProtos {
       "onPeerResponse\022k\n\026DisableReplicationPeer" +
       "\022\'.hbase.pb.DisableReplicationPeerReques" +
       "t\032(.hbase.pb.DisableReplicationPeerRespo" +
-      "nse\022t\n\031listDrainingRegionServers\022*.hbase" +
-      ".pb.ListDrainingRegionServersRequest\032+.h" +
-      "base.pb.ListDrainingRegionServersRespons" +
-      "e\022_\n\022drainRegionServers\022#.hbase.pb.Drain" +
-      "RegionServersRequest\032$.hbase.pb.DrainReg" +
-      "ionServersResponse\022}\n\034removeDrainFromReg",
-      "ionServers\022-.hbase.pb.RemoveDrainFromReg" +
-      "ionServersRequest\032..hbase.pb.RemoveDrain" +
-      "FromRegionServersResponseBI\n1org.apache." +
-      "hadoop.hbase.shaded.protobuf.generatedB\014" +
-      "MasterProtosH\001\210\001\001\240\001\001"
+      "nse\022q\n\030GetReplicationPeerConfig\022).hbase." +
+      "pb.GetReplicationPeerConfigRequest\032*.hba" +
+      "se.pb.GetReplicationPeerConfigResponse\022z" +
+      "\n\033UpdateReplicationPeerConfig\022,.hbase.pb" +
+      ".UpdateReplicationPeerConfigRequest\032-.hb" +
+      "ase.pb.UpdateReplicationPeerConfigRespon",
+      "se\022t\n\031listDrainingRegionServers\022*.hbase." +
+      "pb.ListDrainingRegionServersRequest\032+.hb" +
+      "ase.pb.ListDrainingRegionServersResponse" +
+      "\022_\n\022drainRegionServers\022#.hbase.pb.DrainR" +
+      "egionServersRequest\032$.hbase.pb.DrainRegi" +
+      "onServersResponse\022}\n\034removeDrainFromRegi" +
+      "onServers\022-.hbase.pb.RemoveDrainFromRegi" +
+      "onServersRequest\032..hbase.pb.RemoveDrainF" +
+      "romRegionServersResponseBI\n1org.apache.h" +
+      "adoop.hbase.shaded.protobuf.generatedB\014M",
+      "asterProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {


[28/50] [abbrv] hbase git commit: HBASE-17081 [Recommit]Flush the entire CompactingMemStore content to disk (Anastasia)

Posted by sy...@apache.org.
HBASE-17081 [Recommit]Flush the entire CompactingMemStore content to disk
(Anastasia)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/463ffa79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/463ffa79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/463ffa79

Branch: refs/heads/hbase-12439
Commit: 463ffa792a23799d8cf2406321d1c8a3acacded1
Parents: 8fa5b0b
Author: Ramkrishna <ra...@intel.com>
Authored: Mon Dec 26 22:05:13 2016 +0530
Committer: Ramkrishna <ra...@intel.com>
Committed: Mon Dec 26 22:05:13 2016 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/AbstractMemStore.java    |  35 +-
 .../hbase/regionserver/CompactingMemStore.java  |  83 +++--
 .../hbase/regionserver/CompactionPipeline.java  |  34 +-
 .../regionserver/CompositeImmutableSegment.java | 352 +++++++++++++++++++
 .../hbase/regionserver/DefaultMemStore.java     |  23 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   5 +-
 .../hbase/regionserver/ImmutableSegment.java    |  23 +-
 .../hbase/regionserver/MemStoreCompactor.java   |   4 +-
 .../hadoop/hbase/regionserver/MemstoreSize.java |  25 +-
 .../hadoop/hbase/regionserver/Segment.java      |  21 +-
 .../hbase/regionserver/SegmentFactory.java      |  10 +
 .../regionserver/TestCompactingMemStore.java    |   8 +-
 .../hbase/regionserver/TestDefaultMemStore.java |  12 +-
 .../TestWalAndCompactingMemStoreFlush.java      | 238 +++++++------
 14 files changed, 698 insertions(+), 175 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 225dd73..8564045 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -159,14 +159,12 @@ public abstract class AbstractMemStore implements MemStore {
   public String toString() {
     StringBuffer buf = new StringBuffer();
     int i = 1;
-    try {
-      for (Segment segment : getSegments()) {
-        buf.append("Segment (" + i + ") " + segment.toString() + "; ");
-        i++;
-      }
-    } catch (IOException e){
-      return e.toString();
+
+    for (Segment segment : getSegments()) {
+      buf.append("Segment (" + i + ") " + segment.toString() + "; ");
+      i++;
     }
+
     return buf.toString();
   }
 
@@ -232,6 +230,7 @@ public abstract class AbstractMemStore implements MemStore {
    * @return Next row or null if none found.  If one found, will be a new
    * KeyValue -- can be destroyed by subsequent calls to this method.
    */
+  @VisibleForTesting
   protected Cell getNextRow(final Cell key,
       final NavigableSet<Cell> set) {
     Cell result = null;
@@ -249,6 +248,26 @@ public abstract class AbstractMemStore implements MemStore {
     return result;
   }
 
+  /**
+   * @param cell Find the row that comes after this one.  If null, we return the
+   *             first.
+   * @return Next row or null if none found.
+   */
+  @VisibleForTesting
+  Cell getNextRow(final Cell cell) {
+    Cell lowest = null;
+    List<Segment> segments = getSegments();
+    for (Segment segment : segments) {
+      if (lowest == null) {
+        //TODO: we may want to move the getNextRow ability to the segment
+        lowest = getNextRow(cell, segment.getCellSet());
+      } else {
+        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));
+      }
+    }
+    return lowest;
+  }
+
   private Cell maybeCloneWithAllocator(Cell cell) {
     return active.maybeCloneWithAllocator(cell);
   }
@@ -307,6 +326,6 @@ public abstract class AbstractMemStore implements MemStore {
   /**
    * @return an ordered list of segments from most recent to oldest in memstore
    */
-  protected abstract List<Segment> getSegments() throws IOException;
+  protected abstract List<Segment> getSegments();
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index f8192a2..1cd30dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -72,6 +72,7 @@ public class CompactingMemStore extends AbstractMemStore {
   private final AtomicBoolean inMemoryFlushInProgress = new AtomicBoolean(false);
   @VisibleForTesting
   private final AtomicBoolean allowCompaction = new AtomicBoolean(true);
+  private boolean compositeSnapshot = true;
 
   public static final long DEEP_OVERHEAD = AbstractMemStore.DEEP_OVERHEAD
       + 6 * ClassSize.REFERENCE // Store, RegionServicesForStores, CompactionPipeline,
@@ -160,7 +161,12 @@ public class CompactingMemStore extends AbstractMemStore {
       stopCompaction();
       pushActiveToPipeline(this.active);
       snapshotId = EnvironmentEdgeManager.currentTime();
-      pushTailToSnapshot();
+      // in both cases whatever is pushed to snapshot is cleared from the pipeline
+      if (compositeSnapshot) {
+        pushPipelineToSnapshot();
+      } else {
+        pushTailToSnapshot();
+      }
     }
     return new MemStoreSnapshot(snapshotId, this.snapshot);
   }
@@ -173,8 +179,13 @@ public class CompactingMemStore extends AbstractMemStore {
   public MemstoreSize getFlushableSize() {
     MemstoreSize snapshotSize = getSnapshotSize();
     if (snapshotSize.getDataSize() == 0) {
-      // if snapshot is empty the tail of the pipeline is flushed
-      snapshotSize = pipeline.getTailSize();
+      // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed
+      if (compositeSnapshot) {
+        snapshotSize = pipeline.getPipelineSize();
+        snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
+      } else {
+        snapshotSize = pipeline.getTailSize();
+      }
     }
     return snapshotSize.getDataSize() > 0 ? snapshotSize
         : new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
@@ -213,16 +224,28 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
+  // the getSegments() method is used for tests only
+  @VisibleForTesting
   @Override
   public List<Segment> getSegments() {
     List<Segment> pipelineList = pipeline.getSegments();
     List<Segment> list = new ArrayList<Segment>(pipelineList.size() + 2);
     list.add(this.active);
     list.addAll(pipelineList);
-    list.add(this.snapshot);
+    list.addAll(this.snapshot.getAllSegments());
+
     return list;
   }
 
+  // the following three methods allow to manipulate the settings of composite snapshot
+  public void setCompositeSnapshot(boolean useCompositeSnapshot) {
+    this.compositeSnapshot = useCompositeSnapshot;
+  }
+
+  public boolean isCompositeSnapshot() {
+    return this.compositeSnapshot;
+  }
+
   public boolean swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result,
       boolean merge) {
     return pipeline.swap(versionedList, result, !merge);
@@ -262,18 +285,20 @@ public class CompactingMemStore extends AbstractMemStore {
    * Scanners are ordered from 0 (oldest) to newest in increasing order.
    */
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
-    List<Segment> pipelineList = pipeline.getSegments();
-    long order = pipelineList.size();
-    // The list of elements in pipeline + the active element + the snapshot segment
-    // TODO : This will change when the snapshot is made of more than one element
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(pipelineList.size() + 2);
-    list.add(this.active.getScanner(readPt, order + 1));
-    for (Segment item : pipelineList) {
-      list.add(item.getScanner(readPt, order));
-      order--;
-    }
-    list.add(this.snapshot.getScanner(readPt, order));
-    return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
+
+    int order = 1;                        // for active segment
+    order += pipeline.size();             // for all segments in the pipeline
+    order += snapshot.getNumOfSegments(); // for all segments in the snapshot
+    // TODO: check alternatives to using this order
+    // The list of elements in pipeline + the active element + the snapshot segments
+    // The order is the Segment ordinal
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(order);
+    list.add(this.active.getScanner(readPt, order));
+    order--;
+    list.addAll(pipeline.getScanners(readPt,order));
+    order -= pipeline.size();
+    list.addAll(snapshot.getScanners(readPt,order));
+    return Collections.<KeyValueScanner>singletonList(new MemStoreScanner(getComparator(), list));
   }
 
   /**
@@ -380,6 +405,14 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
+  private void pushPipelineToSnapshot() {
+    List<ImmutableSegment> segments = pipeline.drain();
+    if (!segments.isEmpty()) {
+      this.snapshot =
+          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(),segments);
+    }
+  }
+
   private RegionServicesForStores getRegionServices() {
     return regionServices;
   }
@@ -427,24 +460,6 @@ public class CompactingMemStore extends AbstractMemStore {
     compactor.initiateAction(compactionType);
   }
 
-  /**
-   * @param cell Find the row that comes after this one.  If null, we return the
-   *             first.
-   * @return Next row or null if none found.
-   */
-  Cell getNextRow(final Cell cell) {
-    Cell lowest = null;
-    List<Segment> segments = getSegments();
-    for (Segment segment : segments) {
-      if (lowest == null) {
-        lowest = getNextRow(cell, segment.getCellSet());
-      } else {
-        lowest = getLowest(lowest, getNextRow(cell, segment.getCellSet()));
-      }
-    }
-    return lowest;
-  }
-
   // debug method
   public void debug() {
     String msg = "active size=" + this.active.keySize();

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index 6676170..2fd2a14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -77,6 +78,19 @@ public class CompactionPipeline {
     }
   }
 
+  public List<ImmutableSegment> drain() {
+    int drainSize = pipeline.size();
+    List<ImmutableSegment> result = new ArrayList<ImmutableSegment>(drainSize);
+    synchronized (pipeline){
+      version++;
+      for(int i=0; i<drainSize; i++) {
+        ImmutableSegment segment = this.pipeline.removeFirst();
+        result.add(i,segment);
+      }
+      return result;
+    }
+  }
+
   public VersionedSegmentsList getVersionedList() {
     synchronized (pipeline){
       LinkedList<ImmutableSegment> segmentList = new LinkedList<ImmutableSegment>(pipeline);
@@ -193,8 +207,7 @@ public class CompactionPipeline {
 
   public List<Segment> getSegments() {
     synchronized (pipeline){
-      List<Segment> res = new LinkedList<Segment>(pipeline);
-      return res;
+      return new LinkedList<Segment>(pipeline);
     }
   }
 
@@ -202,6 +215,18 @@ public class CompactionPipeline {
     return pipeline.size();
   }
 
+  public List<KeyValueScanner> getScanners(long readPoint, long order) {
+    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(this.pipeline.size());
+    for (Segment segment : this.pipeline) {
+      scanners.add(segment.getScanner(readPoint, order));
+      // The order is the Segment ordinal
+      order--;
+      assert order>=0; // order should never be negative so this is just a sanity check
+    }
+    return scanners;
+  }
+
+
   public long getMinSequenceId() {
     long minSequenceId = Long.MAX_VALUE;
     if (!isEmpty()) {
@@ -215,6 +240,11 @@ public class CompactionPipeline {
     return new MemstoreSize(pipeline.peekLast().keySize(), pipeline.peekLast().heapOverhead());
   }
 
+  public MemstoreSize getPipelineSize() {
+    if (isEmpty()) return MemstoreSize.EMPTY_SIZE;
+    return new MemstoreSize(getSegmentsKeySize(pipeline), getSegmentsHeapOverhead(pipeline));
+  }
+
   private void swapSuffix(List<ImmutableSegment> suffix, ImmutableSegment segment,
       boolean closeSegmentsInSuffix) {
     version++;

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
new file mode 100644
index 0000000..4fdd2d0
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -0,0 +1,352 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.SortedSet;
+
+/**
+ * The CompositeImmutableSegments is created as a collection of ImmutableSegments and supports
+ * the interface of a single ImmutableSegments.
+ * The CompositeImmutableSegments is planned to be used only as a snapshot,
+ * thus only relevant interfaces are supported
+ */
+@InterfaceAudience.Private
+public class CompositeImmutableSegment extends ImmutableSegment {
+
+  private final List<ImmutableSegment> segments;
+  private final CellComparator comparator;
+  // CompositeImmutableSegment is used for snapshots and snapshot should
+  // support getTimeRangeTracker() interface.
+  // Thus we hold a constant TRT build in the construction time from TRT of the given segments.
+  private final TimeRangeTracker timeRangeTracker;
+  private long keySize = 0;
+
+  // This scanner need to be remembered in order to close it when the snapshot is cleared.
+  // Initially CollectionBackedScanner didn't raise the scanner counters thus there was no
+  // need to close it. Now when MemStoreScanner is used instead we need to decrease the
+  // scanner counters.
+  private KeyValueScanner flushingScanner = null;
+
+  public CompositeImmutableSegment(CellComparator comparator, List<ImmutableSegment> segments) {
+    super(comparator);
+    this.comparator = comparator;
+    this.segments = segments;
+    this.timeRangeTracker = new TimeRangeTracker();
+    for (ImmutableSegment s : segments) {
+      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMax());
+      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMin());
+      this.keySize += s.keySize();
+    }
+  }
+
+  @VisibleForTesting
+  public List<Segment> getAllSegments() {
+    return new LinkedList<Segment>(segments);
+  }
+
+  public long getNumOfSegments() {
+    return segments.size();
+  }
+
+  /**
+   * Builds a special scanner for the MemStoreSnapshot object that is different than the
+   * general segment scanner.
+   * @return a special scanner for the MemStoreSnapshot object
+   */
+  public KeyValueScanner getKeyValueScanner() {
+    KeyValueScanner scanner;
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
+    for (ImmutableSegment s : segments) {
+      list.add(s.getScanner(Long.MAX_VALUE));
+    }
+
+    try {
+      scanner = new MemStoreScanner(getComparator(), list);
+    } catch (IOException ie) {
+      throw new IllegalStateException(ie);
+    }
+
+    flushingScanner = scanner;
+    return scanner;
+  }
+
+  @Override
+  public List<KeyValueScanner> getScanners(long readPoint, long order) {
+    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(this.segments.size());
+    for (Segment segment : this.segments) {
+      scanners.add(segment.getScanner(readPoint, order));
+      // The order is the Segment ordinal
+      order--;
+      // order should never be negative so this is just a sanity check
+      order = (order<0) ? 0 : order;
+    }
+    return scanners;
+  }
+
+  /**
+   * @return whether the segment has any cells
+   */
+  public boolean isEmpty() {
+    for (ImmutableSegment s : segments) {
+      if (!s.isEmpty()) return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return number of cells in segment
+   */
+  public int getCellsCount() {
+    int result = 0;
+    for (ImmutableSegment s : segments) {
+      result += s.getCellsCount();
+    }
+    return result;
+  }
+
+  /**
+   * @return the first cell in the segment that has equal or greater key than the given cell
+   */
+  public Cell getFirstAfter(Cell cell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Closing a segment before it is being discarded
+   */
+  public void close() {
+    if (flushingScanner != null) {
+      flushingScanner.close();
+      flushingScanner = null;
+    }
+    for (ImmutableSegment s : segments) {
+      s.close();
+    }
+  }
+
+  /**
+   * If the segment has a memory allocator the cell is being cloned to this space, and returned;
+   * otherwise the given cell is returned
+   * @return either the given cell or its clone
+   */
+  public Cell maybeCloneWithAllocator(Cell cell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public boolean shouldSeek(Scan scan, long oldestUnexpiredTS){
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public long getMinTimestamp(){
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Creates the scanner for the given read point
+   * @return a scanner for the given read point
+   */
+  public KeyValueScanner getScanner(long readPoint) {
+    KeyValueScanner resultScanner;
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
+    for (ImmutableSegment s : segments) {
+      list.add(s.getScanner(readPoint));
+    }
+
+    try {
+      resultScanner = new MemStoreScanner(getComparator(), list);
+    } catch (IOException ie) {
+      throw new IllegalStateException(ie);
+    }
+
+    return resultScanner;
+  }
+
+  /**
+   * Creates the scanner for the given read point, and a specific order in a list
+   * @return a scanner for the given read point
+   */
+  public KeyValueScanner getScanner(long readPoint, long order) {
+    KeyValueScanner resultScanner;
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
+    for (ImmutableSegment s : segments) {
+      list.add(s.getScanner(readPoint,order));
+    }
+
+    try {
+      resultScanner = new MemStoreScanner(getComparator(), list);
+    } catch (IOException ie) {
+      throw new IllegalStateException(ie);
+    }
+
+    return resultScanner;
+  }
+
+  public boolean isTagsPresent() {
+    for (ImmutableSegment s : segments) {
+      if (s.isTagsPresent()) return true;
+    }
+    return false;
+  }
+
+  public void incScannerCount() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public void decScannerCount() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Setting the CellSet of the segment - used only for flat immutable segment for setting
+   * immutable CellSet after its creation in immutable segment constructor
+   * @return this object
+   */
+
+  protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * @return Sum of all cell's size.
+   */
+  public long keySize() {
+    return this.keySize;
+  }
+
+  /**
+   * @return The heap overhead of this segment.
+   */
+  public long heapOverhead() {
+    long result = 0;
+    for (ImmutableSegment s : segments) {
+      result += s.heapOverhead();
+    }
+    return result;
+  }
+
+  /**
+   * Updates the heap size counter of the segment by the given delta
+   */
+  protected void incSize(long delta, long heapOverhead) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected void incHeapOverheadSize(long delta) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public long getMinSequenceId() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public TimeRangeTracker getTimeRangeTracker() {
+    return this.timeRangeTracker;
+  }
+
+  //*** Methods for SegmentsScanner
+  public Cell last() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public Iterator<Cell> iterator() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public int compare(Cell left, Cell right) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public int compareRows(Cell left, Cell right) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * @return a set of all cells in the segment
+   */
+  protected CellSet getCellSet() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Returns the Cell comparator used by this segment
+   * @return the Cell comparator used by this segment
+   */
+  protected CellComparator getComparator() {
+    return comparator;
+  }
+
+  protected void internalAdd(Cell cell, boolean mslabUsed, MemstoreSize memstoreSize) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
+      MemstoreSize memstoreSize) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected long heapOverheadChange(Cell cell, boolean succ) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Returns a subset of the segment cell set, which starts with the given cell
+   * @param firstCell a cell in the segment
+   * @return a subset of the segment cell set, which starts with the given cell
+   */
+  protected SortedSet<Cell> tailSet(Cell firstCell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  // Debug methods
+  /**
+   * Dumps all cells of the segment into the given log
+   */
+  void dump(Log log) {
+    for (ImmutableSegment s : segments) {
+      s.dump(log);
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb =
+        new StringBuilder("This is CompositeImmutableSegment and those are its segments:: ");
+    for (ImmutableSegment s : segments) {
+      sb.append(s.toString());
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index d4e6e12..76442e1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -127,30 +128,20 @@ public class DefaultMemStore extends AbstractMemStore {
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
     List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(2);
     list.add(this.active.getScanner(readPt, 1));
-    list.add(this.snapshot.getScanner(readPt, 0));
-    return Collections.<KeyValueScanner> singletonList(
-      new MemStoreScanner(getComparator(), list));
+    list.addAll(this.snapshot.getScanners(readPt, 0));
+    return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
   }
 
+  // the getSegments() method is used for tests only
+  @VisibleForTesting
   @Override
-  protected List<Segment> getSegments() throws IOException {
+  protected List<Segment> getSegments() {
     List<Segment> list = new ArrayList<Segment>(2);
     list.add(this.active);
-    list.add(this.snapshot);
+    list.addAll(this.snapshot.getAllSegments());
     return list;
   }
 
-  /**
-   * @param cell Find the row that comes after this one.  If null, we return the
-   * first.
-   * @return Next row or null if none found.
-   */
-  Cell getNextRow(final Cell cell) {
-    return getLowest(
-        getNextRow(cell, this.active.getCellSet()),
-        getNextRow(cell, this.snapshot.getCellSet()));
-  }
-
   @Override public void updateLowestUnflushedSequenceIdInWAL(boolean onlyIfMoreRecent) {
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index e11a31c..b664a4a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.apache.hadoop.hbase.HConstants.REPLICATION_SCOPE_LOCAL;
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Optional;
 import com.google.common.base.Preconditions;
@@ -6483,8 +6484,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         final Configuration conf, final HTableDescriptor hTableDescriptor,
         final WAL wal, final boolean initialize)
   throws IOException {
-    LOG.info("creating HRegion " + info.getTable().getNameAsString()
-        + " HTD == " + hTableDescriptor + " RootDir = " + rootDir +
+    LOG.info("creating HRegion " + info.getTable().getNameAsString() + " HTD == " + hTableDescriptor
+        + " RootDir = " + rootDir +
         " Table name == " + info.getTable().getNameAsString());
     FileSystem fs = FileSystem.get(conf);
     Path tableDir = FSUtils.getTableDir(rootDir, info.getTable());

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index 4cdb29d..547d332 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -30,6 +30,10 @@ import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.util.CollectionBackedScanner;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
 
 /**
  * ImmutableSegment is an abstract class that extends the API supported by a {@link Segment},
@@ -69,6 +73,14 @@ public class ImmutableSegment extends Segment {
 
   /////////////////////  CONSTRUCTORS  /////////////////////
   /**------------------------------------------------------------------------
+   * Empty C-tor to be used only for CompositeImmutableSegment
+   */
+  protected ImmutableSegment(CellComparator comparator) {
+    super(comparator);
+    this.timeRange = null;
+  }
+
+  /**------------------------------------------------------------------------
    * Copy C-tor to be used when new ImmutableSegment is being built from a Mutable one.
    * This C-tor should be used when active MutableSegment is pushed into the compaction
    * pipeline and becomes an ImmutableSegment.
@@ -142,6 +154,15 @@ public class ImmutableSegment extends Segment {
     return this.timeRange.getMin();
   }
 
+  public long getNumOfSegments() {
+    return 1;
+  }
+
+  public List<Segment> getAllSegments() {
+    List<Segment> res = new ArrayList<Segment>(Arrays.asList(this));
+    return res;
+  }
+
   /**------------------------------------------------------------------------
    * Change the CellSet of this ImmutableSegment from one based on ConcurrentSkipListMap to one
    * based on CellArrayMap.
@@ -232,7 +253,7 @@ public class ImmutableSegment extends Segment {
     Cell curCell;
     int idx = 0;
     // create this segment scanner with maximal possible read point, to go over all Cells
-    SegmentScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
+    KeyValueScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
 
     try {
       while ((curCell = segmentScanner.next()) != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index 84f88f0..29fd78a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -56,7 +56,7 @@ public class MemStoreCompactor {
 
   // The upper bound for the number of segments we store in the pipeline prior to merging.
   // This constant is subject to further experimentation.
-  private static final int THRESHOLD_PIPELINE_SEGMENTS = 1;
+  private static final int THRESHOLD_PIPELINE_SEGMENTS = 30; // stands here for infinity
 
   private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class);
 
@@ -276,6 +276,8 @@ public class MemStoreCompactor {
     case NONE: action = Action.NOOP;
       break;
     case BASIC: action = Action.MERGE;
+      // if multiple segments appear in the pipeline flush them to the disk later together
+      compactingMemStore.setCompositeSnapshot(true);
       break;
     case EAGER: action = Action.COMPACT;
       break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
index 77cea51..fa7c342 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
@@ -25,19 +25,32 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class MemstoreSize {
 
-  static final MemstoreSize EMPTY_SIZE = new MemstoreSize();
-
   private long dataSize;
   private long heapOverhead;
+  final private boolean isEmpty;
+
+  static final MemstoreSize EMPTY_SIZE = new MemstoreSize(true);
 
   public MemstoreSize() {
     dataSize = 0;
     heapOverhead = 0;
+    isEmpty = false;
+  }
+
+  public MemstoreSize(boolean isEmpty) {
+    dataSize = 0;
+    heapOverhead = 0;
+    this.isEmpty = isEmpty;
+  }
+
+  public boolean isEmpty() {
+    return isEmpty;
   }
 
   public MemstoreSize(long dataSize, long heapOverhead) {
     this.dataSize = dataSize;
     this.heapOverhead = heapOverhead;
+    this.isEmpty = false;
   }
 
   public void incMemstoreSize(long dataSize, long heapOverhead) {
@@ -61,11 +74,13 @@ public class MemstoreSize {
   }
 
   public long getDataSize() {
-    return dataSize;
+
+    return isEmpty ? 0 : dataSize;
   }
 
   public long getHeapOverhead() {
-    return heapOverhead;
+
+    return isEmpty ? 0 : heapOverhead;
   }
 
   @Override
@@ -74,7 +89,7 @@ public class MemstoreSize {
       return false;
     }
     MemstoreSize other = (MemstoreSize) obj;
-    return this.dataSize == other.dataSize && this.heapOverhead == other.heapOverhead;
+    return getDataSize() == other.dataSize && getHeapOverhead() == other.heapOverhead;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index afdfe6f..8581517 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -18,7 +18,9 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.SortedSet;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
@@ -64,6 +66,15 @@ public abstract class Segment {
   protected final TimeRangeTracker timeRangeTracker;
   protected volatile boolean tagsPresent;
 
+  // Empty constructor to be used when Segment is used as interface,
+  // and there is no need in true Segments state
+  protected Segment(CellComparator comparator) {
+    this.comparator = comparator;
+    this.dataSize = new AtomicLong(0);
+    this.heapOverhead = new AtomicLong(0);
+    this.timeRangeTracker = new TimeRangeTracker();
+  }
+
   // This constructor is used to create empty Segments.
   protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB) {
     this.cellSet.set(cellSet);
@@ -91,7 +102,7 @@ public abstract class Segment {
    * Creates the scanner for the given read point
    * @return a scanner for the given read point
    */
-  public SegmentScanner getScanner(long readPoint) {
+  public KeyValueScanner getScanner(long readPoint) {
     return new SegmentScanner(this, readPoint);
   }
 
@@ -99,10 +110,16 @@ public abstract class Segment {
    * Creates the scanner for the given read point, and a specific order in a list
    * @return a scanner for the given read point
    */
-  public SegmentScanner getScanner(long readPoint, long order) {
+  public KeyValueScanner getScanner(long readPoint, long order) {
     return new SegmentScanner(this, readPoint, order);
   }
 
+  public List<KeyValueScanner> getScanners(long readPoint, long order) {
+    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(1);
+    scanners.add(getScanner(readPoint, order));
+    return scanners;
+  }
+
   /**
    * @return whether the segment has any cells
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
index 01e07ef..7e53026 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
@@ -47,6 +47,13 @@ public final class SegmentFactory {
     return new ImmutableSegment(comparator, iterator, MemStoreLAB.newInstance(conf));
   }
 
+  // create composite immutable segment from a list of segments
+  public CompositeImmutableSegment createCompositeImmutableSegment(
+      final CellComparator comparator, List<ImmutableSegment> segments) {
+    return new CompositeImmutableSegment(comparator, segments);
+
+  }
+
   // create new flat immutable segment from compacting old immutable segments
   public ImmutableSegment createImmutableSegmentByCompaction(final Configuration conf,
       final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells,
@@ -102,6 +109,9 @@ public final class SegmentFactory {
 
   private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) {
     List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>();
+    if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) {
+      return null;
+    }
     for (ImmutableSegment segment : segments) {
       mslabs.add(segment.getMemStoreLAB());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
index b0b63a9..0c1880c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactingMemStore.java
@@ -137,6 +137,7 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     this.memstore = new CompactingMemStore(HBaseConfiguration.create(),
         CellComparator.COMPARATOR, store, regionServicesForStores,
         HColumnDescriptor.MemoryCompaction.EAGER);
+
     this.memstore.add(kv1.clone(), null);
     // As compaction is starting in the background the repetition
     // of the k1 might be removed BUT the scanners created earlier
@@ -177,6 +178,9 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
     // Add more versions to make it a little more interesting.
     Thread.sleep(1);
     addRows(this.memstore);
+    ((CompactingMemStore)this.memstore).setCompositeSnapshot(true);
+
+
     Cell closestToEmpty = ((CompactingMemStore)this.memstore).getNextRow(KeyValue.LOWESTKEY);
     assertTrue(CellComparator.COMPARATOR.compareRows(closestToEmpty,
         new KeyValue(Bytes.toBytes(0), System.currentTimeMillis())) == 0);
@@ -277,7 +281,9 @@ public class TestCompactingMemStore extends TestDefaultMemStore {
 
     this.memstore.upsert(l, 2, null);// readpoint is 2
     MemstoreSize newSize = this.memstore.size();
-    assert (newSize.getDataSize() > oldSize.getDataSize());
+    assertTrue("\n<<< The old size is " + oldSize.getDataSize() + " and the new size is "
+        + newSize.getDataSize() + "\n",
+        newSize.getDataSize() > oldSize.getDataSize());
     //The kv1 should be removed.
     assert (memstore.getActive().getCellsCount() == 2);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 27ed295..93d28d5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -65,8 +65,6 @@ import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
 import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryMXBean;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
@@ -180,6 +178,10 @@ public class TestDefaultMemStore {
     // Now assert can count same number even if a snapshot mid-scan.
     s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
     count = 0;
+
+//    assertTrue("\n<<< The memstore scanners without snapshot are: \n" + memstorescanners
+//        + "\n",false);
+
     try {
       while (s.next(result)) {
         LOG.info(result);
@@ -207,8 +209,10 @@ public class TestDefaultMemStore {
     s = new StoreScanner(scan, scanInfo, scanType, null, memstorescanners);
     count = 0;
     int snapshotIndex = 5;
+
     try {
       while (s.next(result)) {
+
         LOG.info(result);
         // Assert the stuff is coming out in right order.
         assertTrue(CellUtil.matchingRow(result.get(0), Bytes.toBytes(count)));
@@ -216,6 +220,7 @@ public class TestDefaultMemStore {
         assertEquals("count=" + count + ", result=" + result, rowCount, result.size());
         count++;
         if (count == snapshotIndex) {
+
           MemStoreSnapshot snapshot = this.memstore.snapshot();
           this.memstore.clearSnapshot(snapshot.getId());
           // Added more rows into kvset.  But the scanner wont see these rows.
@@ -227,7 +232,8 @@ public class TestDefaultMemStore {
     } finally {
       s.close();
     }
-    assertEquals(rowCount, count);
+    assertEquals("\n<<< The row count is " + rowCount + " and the iteration count is " + count,
+        rowCount, count);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/463ffa79/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 133c53b..332a125 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -22,13 +22,7 @@ import java.util.Arrays;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -38,6 +32,7 @@ import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.WAL;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -55,40 +50,48 @@ public class TestWalAndCompactingMemStoreFlush {
 
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
   private static final Path DIR = TEST_UTIL.getDataTestDir("TestHRegion");
-  public static final TableName TABLENAME = TableName.valueOf("TestWalAndCompactingMemStoreFlush",
-      "t1");
+  public static final TableName TABLENAME =
+      TableName.valueOf("TestWalAndCompactingMemStoreFlush", "t1");
 
-  public static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2"),
-      Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") };
+  public static final byte[][] FAMILIES =
+      { Bytes.toBytes("f1"), Bytes.toBytes("f2"), Bytes.toBytes("f3"), Bytes.toBytes("f4"), Bytes.toBytes("f5") };
 
   public static final byte[] FAMILY1 = FAMILIES[0];
   public static final byte[] FAMILY2 = FAMILIES[1];
   public static final byte[] FAMILY3 = FAMILIES[2];
 
   private HRegion initHRegion(String callingMethod, Configuration conf) throws IOException {
-    int i=0;
+    MemstoreSize memstrsize1 = MemstoreSize.EMPTY_SIZE;
+    assertEquals(memstrsize1.getDataSize(), 0);
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+    int i = 0;
     HTableDescriptor htd = new HTableDescriptor(TABLENAME);
     for (byte[] family : FAMILIES) {
       HColumnDescriptor hcd = new HColumnDescriptor(family);
       // even column families are going to have compacted memstore
+
       if(i%2 == 0) {
         hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.valueOf(
             conf.get(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY)));
       } else {
         hcd.setInMemoryCompaction(HColumnDescriptor.MemoryCompaction.NONE);
       }
+
       htd.addFamily(hcd);
       i++;
     }
-
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
     HRegionInfo info = new HRegionInfo(TABLENAME, null, null, false);
     Path path = new Path(DIR, callingMethod);
-    return HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+    HRegion result = HBaseTestingUtility.createRegionAndWAL(info, path, conf, htd);
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+    return result;
   }
 
   // A helper function to create puts.
   private Put createPut(int familyNum, int putNum) {
-    byte[] qf  = Bytes.toBytes("q" + familyNum);
+    byte[] qf = Bytes.toBytes("q" + familyNum);
     byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum);
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     Put p = new Put(row);
@@ -98,7 +101,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
   // A helper function to create double puts, so something can be compacted later.
   private Put createDoublePut(int familyNum, int putNum) {
-    byte[] qf  = Bytes.toBytes("q" + familyNum);
+    byte[] qf = Bytes.toBytes("q" + familyNum);
     byte[] row = Bytes.toBytes("row" + familyNum + "-" + putNum);
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     Put p = new Put(row);
@@ -122,16 +125,21 @@ public class TestWalAndCompactingMemStoreFlush {
     byte[] val = Bytes.toBytes("val" + familyNum + "-" + putNum);
     assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum), r.getFamilyMap(family));
     assertNotNull(("Missing Put#" + putNum + " for CF# " + familyNum),
-      r.getFamilyMap(family).get(qf));
+        r.getFamilyMap(family).get(qf));
     assertTrue(("Incorrect value for Put#" + putNum + " for CF# " + familyNum),
-      Arrays.equals(r.getFamilyMap(family).get(qf), val));
+        Arrays.equals(r.getFamilyMap(family).get(qf), val));
   }
 
+  @Before public void setUp() throws Exception {
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
+  }
+
+  // test selective flush with data-compaction
   @Test(timeout = 180000)
   public void testSelectiveFlushWithEager() throws IOException {
-
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
+
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
     conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
         FlushNonSloppyStoresFirstPolicy.class.getName());
@@ -175,17 +183,14 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
 
     // Get the overall smallest LSN in the region's memstores.
-    long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseI =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
     String s = "\n\n----------------------------------\n"
-        + "Upon initial insert and before any flush, size of CF1 is:"
-        + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:"
-        + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:"
-        + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:"
-        + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:"
-        + cf3MemstoreSizePhaseI + ", is CF3 compacted memstore?:"
-        + region.getStore(FAMILY3).isSloppyMemstore() + "\n";
+        + "Upon initial insert and before any flush, size of CF1 is:" + cf1MemstoreSizePhaseI + ", is CF1 compacted memstore?:"
+        + region.getStore(FAMILY1).isSloppyMemstore() + ". Size of CF2 is:" + cf2MemstoreSizePhaseI + ", is CF2 compacted memstore?:"
+        + region.getStore(FAMILY2).isSloppyMemstore() + ". Size of CF3 is:" + cf3MemstoreSizePhaseI
+        + ", is CF3 compacted memstore?:" + region.getStore(FAMILY3).isSloppyMemstore() + "\n";
 
     // The overall smallest LSN in the region's memstores should be the same as
     // the LSN of the smallest edit in CF1
@@ -200,12 +205,12 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    String msg = "totalMemstoreSize="+totalMemstoreSize +
-        " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
-        " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
-        " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
-    assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize()
-        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+    String msg = "totalMemstoreSize=" + totalMemstoreSize +
+        " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI +
+        " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI +
+        " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI;
+    assertEquals(msg, totalMemstoreSize,
+        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
 
     // Flush!!!!!!!!!!!!!!!!!!!!!!
     // We have big compacting memstore CF1 and two small memstores:
@@ -225,8 +230,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore();
 
-    long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseII =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     // Find the smallest LSNs for edits wrt to each CF.
     long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2);
@@ -260,16 +265,20 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseII
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseII + ", " +
-        "the smallest sequence in CF2:"
-        + smallestSeqCF2PhaseII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseII + "\n";
+        "the smallest sequence in CF2:" + smallestSeqCF2PhaseII + ", the smallest sequence in CF3:"
+        + smallestSeqCF3PhaseII + "\n";
 
     // How much does the CF1 memstore occupy? Will be used later.
     MemstoreSize cf1MemstoreSizePhaseIII = region.getStore(FAMILY1).getSizeOfMemStore();
     long smallestSeqCF1PhaseIII = region.getOldestSeqIdOfStore(FAMILY1);
 
     s = s + "----After more puts into CF1 its size is:" + cf1MemstoreSizePhaseIII
-        + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n" ;
-
+        + ", and its sequence is:" + smallestSeqCF1PhaseIII + " ----\n"
+        + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
+        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region
+        .getStore(FAMILY3).getFlushedCellsSize() + ", cf4: " + region.getStore(FAMILIES[4])
+        .getFlushedCellsSize() + "; the entire region size is: " + region.getMemstoreSize() + "\n";
+    ;
 
     // Flush!!!!!!!!!!!!!!!!!!!!!!
     // Flush again, CF1 is flushed to disk
@@ -282,21 +291,22 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getSizeOfMemStore();
 
-    long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseIV =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long smallestSeqCF1PhaseIV = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseIV = region.getOldestSeqIdOfStore(FAMILY2);
     long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
 
     s = s + "----After SECOND FLUSH, CF1 size is:" + cf1MemstoreSizePhaseIV + ", CF2 size is:"
-        + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV
-        + "\n";
+        + cf2MemstoreSizePhaseIV + " and CF3 size is:" + cf3MemstoreSizePhaseIV + "\n" + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
+        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region
+        .getStore(FAMILY3).getFlushedCellsSize() + ", cf4: " + region.getStore(FAMILIES[4])
+        .getFlushedCellsSize() + "\n";
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " +
-        "the smallest sequence in CF2:"
-        + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV
-        + "\n";
+        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + ", the smallest sequence in CF3:"
+        + smallestSeqCF3PhaseIV + "\n" + "the entire region size is: " + region.getMemstoreSize() + "\n";
 
     // CF1's pipeline component (inserted before first flush) should be flushed to disk
     // CF2 should be flushed to disk
@@ -321,13 +331,21 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseV =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
-    assertEquals(MemstoreSize.EMPTY_SIZE , cf1MemstoreSizePhaseV);
+    assertEquals(MemstoreSize.EMPTY_SIZE, cf1MemstoreSizePhaseV);
     assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseV);
     assertEquals(MemstoreSize.EMPTY_SIZE, cf3MemstoreSizePhaseV);
 
+    s = s + "----AFTER THIRD FLUSH, the entire region size is:" + region.getMemstoreSize()
+        + " (empty memstore size is " + MemstoreSize.EMPTY_SIZE
+        + "), while the sizes of each memstore are as following \ncf1: " + cf1MemstoreSizePhaseV
+        + ", cf2: " + cf2MemstoreSizePhaseV + ", cf3: " + cf3MemstoreSizePhaseV + ", cf4: " + region
+        .getStore(FAMILIES[4]).getSizeOfMemStore() + "\n" + "The sizes of snapshots are cf1: " + region.getStore(FAMILY1).getFlushedCellsSize()
+        + ", cf2: " + region.getStore(FAMILY2).getFlushedCellsSize() + ", cf3: " + region.getStore(FAMILY3).getFlushedCellsSize()
+        + ", cf4: " + region.getStore(FAMILIES[4]).getFlushedCellsSize() + "\n";
+
     // What happens when we hit the memstore limit, but we are not able to find
     // any Column Family above the threshold?
     // In that case, we should flush all the CFs.
@@ -345,24 +363,22 @@ public class TestWalAndCompactingMemStoreFlush {
 
     region.flush(false);
 
-    s = s + "----AFTER THIRD AND FORTH FLUSH, The smallest sequence in region WAL is: "
+    s = s + "----AFTER FORTH FLUSH, The smallest sequence in region WAL is: "
         + smallestSeqInRegionCurrentMemstorePhaseV
         + ". After additional inserts and last flush, the entire region size is:" + region
-        .getMemstoreSize()
-        + "\n----------------------------------\n";
+        .getMemstoreSize() + "\n----------------------------------\n";
 
     // Since we won't find any CF above the threshold, and hence no specific
     // store to flush, we should flush all the memstores
     // Also compacted memstores are flushed to disk.
-    assertEquals(0, region.getMemstoreSize());
+    assertEquals(s, 0, region.getMemstoreSize());
     System.out.println(s);
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
   /*------------------------------------------------------------------------------*/
   /* Check the same as above but for index-compaction type of compacting memstore */
-  @Test(timeout = 180000)
-  public void testSelectiveFlushWithIndexCompaction() throws IOException {
+  @Test(timeout = 180000) public void testSelectiveFlushWithIndexCompaction() throws IOException {
 
     /*------------------------------------------------------------------------------*/
     /* SETUP */
@@ -379,7 +395,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // Initialize the region
     Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
-
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
     /*------------------------------------------------------------------------------*/
     /* PHASE I - insertions */
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
@@ -410,8 +426,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
     // Get the overall smallest LSN in the region's memstores.
-    long smallestSeqInRegionCurrentMemstorePhaseI = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseI =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE I - validation */
@@ -427,8 +443,8 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    assertEquals(totalMemstoreSizePhaseI, cf1MemstoreSizePhaseI.getDataSize()
-        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+    assertEquals(totalMemstoreSizePhaseI,
+        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE I - Flush */
@@ -459,8 +475,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseII = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseII = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseII = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseII = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseII =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     // Find the smallest LSNs for edits wrt to each CF.
     long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
     long totalMemstoreSizePhaseII = region.getMemstoreSize();
@@ -468,13 +484,13 @@ public class TestWalAndCompactingMemStoreFlush {
     /*------------------------------------------------------------------------------*/
     /* PHASE II - validation */
     // CF1 was flushed to memory, should be flattened and take less space
-    assertEquals(cf1MemstoreSizePhaseII.getDataSize() , cf1MemstoreSizePhaseI.getDataSize());
+    assertEquals(cf1MemstoreSizePhaseII.getDataSize(), cf1MemstoreSizePhaseI.getDataSize());
     assertTrue(cf1MemstoreSizePhaseII.getHeapOverhead() < cf1MemstoreSizePhaseI.getHeapOverhead());
     // CF2 should become empty
     assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
     // verify that CF3 was flushed to memory and was not compacted (this is an approximation check)
     // if compacted CF# should be at least twice less because its every key was duplicated
-    assertEquals(cf3MemstoreSizePhaseII.getDataSize() , cf3MemstoreSizePhaseI.getDataSize());
+    assertEquals(cf3MemstoreSizePhaseII.getDataSize(), cf3MemstoreSizePhaseI.getDataSize());
     assertTrue(
         cf3MemstoreSizePhaseI.getHeapOverhead() / 2 < cf3MemstoreSizePhaseII.getHeapOverhead());
 
@@ -484,8 +500,8 @@ public class TestWalAndCompactingMemStoreFlush {
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3. Counting the empty active segments in CF1/2/3 and pipeline
     // items in CF1/2
-    assertEquals(totalMemstoreSizePhaseII, cf1MemstoreSizePhaseII.getDataSize()
-        + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
+    assertEquals(totalMemstoreSizePhaseII,
+        cf1MemstoreSizePhaseII.getDataSize() + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /*------------------------------------------------------------------------------*/
@@ -513,8 +529,8 @@ public class TestWalAndCompactingMemStoreFlush {
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3. Counting the empty active segments in CF1/2/3 and pipeline
     // items in CF1/2
-    assertEquals(totalMemstoreSizePhaseIII, cf1MemstoreSizePhaseIII.getDataSize()
-        + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
+    assertEquals(totalMemstoreSizePhaseIII,
+        cf1MemstoreSizePhaseIII.getDataSize() + cf2MemstoreSizePhaseII.getDataSize() + cf3MemstoreSizePhaseII.getDataSize());
 
     /*------------------------------------------------------------------------------*/
     /* PHASE III - Flush */
@@ -530,8 +546,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseIV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseIV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseIV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseIV = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseIV =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long smallestSeqCF3PhaseIV = region.getOldestSeqIdOfStore(FAMILY3);
 
     /*------------------------------------------------------------------------------*/
@@ -561,8 +577,8 @@ public class TestWalAndCompactingMemStoreFlush {
     MemstoreSize cf1MemstoreSizePhaseV = region.getStore(FAMILY1).getSizeOfMemStore();
     MemstoreSize cf2MemstoreSizePhaseV = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseV = region.getStore(FAMILY3).getSizeOfMemStore();
-    long smallestSeqInRegionCurrentMemstorePhaseV = getWAL(region)
-        .getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
+    long smallestSeqInRegionCurrentMemstorePhaseV =
+        getWAL(region).getEarliestMemstoreSeqNum(region.getRegionInfo().getEncodedNameAsBytes());
     long totalMemstoreSizePhaseV = region.getMemstoreSize();
 
     /*------------------------------------------------------------------------------*/
@@ -617,22 +633,30 @@ public class TestWalAndCompactingMemStoreFlush {
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
-  @Test(timeout = 180000)
-  public void testSelectiveFlushAndWALinDataCompaction() throws IOException {
+  // test WAL behavior together with selective flush while data-compaction
+  @Test(timeout = 180000) public void testDCwithWAL() throws IOException {
+
+    MemstoreSize checkSize = MemstoreSize.EMPTY_SIZE;
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
-    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class
-        .getName());
-    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 *
-        1024);
+    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
+        FlushNonSloppyStoresFirstPolicy.class.getName());
+    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
     conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
     // set memstore to do data compaction and not to use the speculative scan
     conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
         String.valueOf(HColumnDescriptor.MemoryCompaction.EAGER));
 
+    MemstoreSize memstrsize1 = MemstoreSize.EMPTY_SIZE;
+    assertEquals(MemstoreSize.EMPTY_SIZE.getDataSize(), 0);
     // Intialize the HRegion
     HRegion region = initHRegion("testSelectiveFlushAndWALinDataCompaction", conf);
+
+    MemstoreSize cf2MemstoreSizePhase0 = region.getStore(FAMILY2).getSizeOfMemStore();
+    MemstoreSize cf1MemstoreSizePhase0 = region.getStore(FAMILY1).getSizeOfMemStore();
+
     // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
     for (int i = 1; i <= 1200; i++) {
       region.put(createPut(1, i));
@@ -652,6 +676,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // Find the sizes of the memstores of each CF.
     MemstoreSize cf1MemstoreSizePhaseI = region.getStore(FAMILY1).getSizeOfMemStore();
+    //boolean oldCF2 = region.getStore(FAMILY2).isSloppyMemstore();
     MemstoreSize cf2MemstoreSizePhaseI = region.getStore(FAMILY2).getSizeOfMemStore();
     MemstoreSize cf3MemstoreSizePhaseI = region.getStore(FAMILY3).getSizeOfMemStore();
 
@@ -662,16 +687,20 @@ public class TestWalAndCompactingMemStoreFlush {
 
     // The total memstore size should be the same as the sum of the sizes of
     // memstores of CF1, CF2 and CF3.
-    String msg = "totalMemstoreSize="+totalMemstoreSize +
-        " DefaultMemStore.DEEP_OVERHEAD="+DefaultMemStore.DEEP_OVERHEAD +
-        " cf1MemstoreSizePhaseI="+cf1MemstoreSizePhaseI +
-        " cf2MemstoreSizePhaseI="+cf2MemstoreSizePhaseI +
-        " cf3MemstoreSizePhaseI="+cf3MemstoreSizePhaseI ;
-    assertEquals(msg, totalMemstoreSize, cf1MemstoreSizePhaseI.getDataSize()
-        + cf2MemstoreSizePhaseI.getDataSize() + cf3MemstoreSizePhaseI.getDataSize());
+    String msg = "\n<<< totalMemstoreSize=" + totalMemstoreSize +
+        " DefaultMemStore.DEEP_OVERHEAD=" + DefaultMemStore.DEEP_OVERHEAD +
+        " cf1MemstoreSizePhaseI=" + cf1MemstoreSizePhaseI +
+        " cf2MemstoreSizePhaseI=" + cf2MemstoreSizePhaseI +
+        " cf3MemstoreSizePhaseI=" + cf3MemstoreSizePhaseI;
+    assertEquals(msg, totalMemstoreSize,
+        cf1MemstoreSizePhaseI.getDataSize() + cf2MemstoreSizePhaseI.getDataSize()
+            + cf3MemstoreSizePhaseI.getDataSize());
 
     // Flush!
     CompactingMemStore cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore;
+    MemStore cms2 = ((HStore) region.getStore(FAMILY2)).memstore;
+    MemstoreSize memstrsize2 = cms2.getSnapshotSize();
+    MemstoreSize flshsize2 = cms2.getFlushableSize();
     CompactingMemStore cms3 = (CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore;
     cms1.flushInMemory();
     cms3.flushInMemory();
@@ -684,15 +713,22 @@ public class TestWalAndCompactingMemStoreFlush {
     long smallestSeqCF1PhaseII = region.getOldestSeqIdOfStore(FAMILY1);
     long smallestSeqCF2PhaseII = region.getOldestSeqIdOfStore(FAMILY2);
     long smallestSeqCF3PhaseII = region.getOldestSeqIdOfStore(FAMILY3);
+    MemstoreSize newSize = new MemstoreSize();
 
     // CF2 should have been cleared
-    assertEquals(MemstoreSize.EMPTY_SIZE, cf2MemstoreSizePhaseII);
-
-    String s = "\n\n----------------------------------\n"
-        + "Upon initial insert and flush, LSN of CF1 is:"
-        + smallestSeqCF1PhaseII + ". LSN of CF2 is:"
-        + smallestSeqCF2PhaseII + ". LSN of CF3 is:"
-        + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:"
+    assertEquals(
+        msg + "\n<<< CF2 is compacting " + ((HStore) region.getStore(FAMILY2)).memstore.isSloppy()
+            + ", snapshot and flushable size BEFORE flush " + memstrsize2 + "; " + flshsize2
+            + ", snapshot and flushable size AFTER flush " + cms2.getSnapshotSize() + "; " + cms2
+            .getFlushableSize() + "\n<<< cf2 size " + cms2.size() + "; the checked size "
+            + cf2MemstoreSizePhaseII + "; memstore empty size " + MemstoreSize.EMPTY_SIZE
+            + "; check size " + checkSize + "\n<<< first first first CF2 size "
+            + cf2MemstoreSizePhase0 + "; first first first CF1 size " + cf1MemstoreSizePhase0
+            + "; new new new size " + newSize + "\n", MemstoreSize.EMPTY_SIZE,
+        cf2MemstoreSizePhaseII);
+
+    String s = "\n\n----------------------------------\n" + "Upon initial insert and flush, LSN of CF1 is:"
+        + smallestSeqCF1PhaseII + ". LSN of CF2 is:" + smallestSeqCF2PhaseII + ". LSN of CF3 is:" + smallestSeqCF3PhaseII + ", smallestSeqInRegionCurrentMemstore:"
         + smallestSeqInRegionCurrentMemstorePhaseII + "\n";
 
     // Add same entries to compact them later
@@ -718,8 +754,8 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIII
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIII + ", " +
-        "the smallest sequence in CF2:"
-        + smallestSeqCF2PhaseIII +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIII + "\n";
+        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIII + ", the smallest sequence in CF3:"
+        + smallestSeqCF3PhaseIII + "\n";
 
     // Flush!
     cms1 = (CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore;
@@ -736,20 +772,22 @@ public class TestWalAndCompactingMemStoreFlush {
 
     s = s + "The smallest sequence in region WAL is: " + smallestSeqInRegionCurrentMemstorePhaseIV
         + ", the smallest sequence in CF1:" + smallestSeqCF1PhaseIV + ", " +
-        "the smallest sequence in CF2:"
-        + smallestSeqCF2PhaseIV +", the smallest sequence in CF3:" + smallestSeqCF3PhaseIV + "\n";
+        "the smallest sequence in CF2:" + smallestSeqCF2PhaseIV + ", the smallest sequence in CF3:"
+        + smallestSeqCF3PhaseIV + "\n";
 
     // now check that the LSN of the entire WAL, of CF1 and of CF3 has progressed due to compaction
-    assertTrue(s, smallestSeqInRegionCurrentMemstorePhaseIV >
-        smallestSeqInRegionCurrentMemstorePhaseIII);
+    assertTrue(s,
+        smallestSeqInRegionCurrentMemstorePhaseIV > smallestSeqInRegionCurrentMemstorePhaseIII);
     assertTrue(smallestSeqCF1PhaseIV > smallestSeqCF1PhaseIII);
     assertTrue(smallestSeqCF3PhaseIV > smallestSeqCF3PhaseIII);
 
     HBaseTestingUtility.closeRegionAndWAL(region);
   }
 
+  // test WAL behavior together with selective flush while index-compaction
   @Test(timeout = 180000)
-  public void testSelectiveFlushAndWALinIndexCompaction() throws IOException {
+  public void tstICwithWAL() throws IOException {
+
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 600 * 1024);


[22/50] [abbrv] hbase git commit: HBASE-17174 Refactor the AsyncProcess, BufferedMutatorImpl, and HTable

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
new file mode 100644
index 0000000..b46e572
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java
@@ -0,0 +1,336 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.BrokenBarrierException;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.RequestController.ReturnCode;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ClientTests.class, SmallTests.class})
+public class TestSimpleRequestController {
+
+  private static final TableName DUMMY_TABLE
+          = TableName.valueOf("DUMMY_TABLE");
+  private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
+  private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
+  private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
+  private static final ServerName SN = ServerName.valueOf("s1:1,1");
+  private static final ServerName SN2 = ServerName.valueOf("s2:2,2");
+  private static final ServerName SN3 = ServerName.valueOf("s3:3,3");
+  private static final HRegionInfo HRI1
+          = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
+  private static final HRegionInfo HRI2
+          = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
+  private static final HRegionInfo HRI3
+          = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
+  private static final HRegionLocation LOC1 = new HRegionLocation(HRI1, SN);
+  private static final HRegionLocation LOC2 = new HRegionLocation(HRI2, SN);
+  private static final HRegionLocation LOC3 = new HRegionLocation(HRI3, SN2);
+
+  @Test
+  public void testIllegalRequestSize() {
+    testIllegalArgument(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, -1);
+  }
+
+  @Test
+  public void testIllegalRsTasks() {
+    testIllegalArgument(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS, -1);
+  }
+
+  @Test
+  public void testIllegalRegionTasks() {
+    testIllegalArgument(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS, -1);
+  }
+
+  @Test
+  public void testIllegalSubmittedSize() {
+    testIllegalArgument(SimpleRequestController.HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, -1);
+  }
+
+  private void testIllegalArgument(String key, long value) {
+    Configuration conf = HBaseConfiguration.create();
+    conf.setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, -1);
+    try {
+      SimpleRequestController controller = new SimpleRequestController(conf);
+      fail("The " + key + " must be bigger than zero");
+    } catch (IllegalArgumentException e) {
+    }
+  }
+
+  private static Put createPut(long maxHeapSizePerRequest) {
+    return new Put(Bytes.toBytes("row")) {
+      @Override
+      public long heapSize() {
+        return maxHeapSizePerRequest;
+      }
+    };
+  }
+
+  @Test
+  public void testTaskCheckerHost() throws IOException {
+    final int maxTotalConcurrentTasks = 100;
+    final int maxConcurrentTasksPerServer = 2;
+    final int maxConcurrentTasksPerRegion = 1;
+    final AtomicLong tasksInProgress = new AtomicLong(0);
+    final Map<ServerName, AtomicInteger> taskCounterPerServer = new HashMap<>();
+    final Map<byte[], AtomicInteger> taskCounterPerRegion = new HashMap<>();
+    SimpleRequestController.TaskCountChecker countChecker = new SimpleRequestController.TaskCountChecker(
+            maxTotalConcurrentTasks,
+            maxConcurrentTasksPerServer,
+            maxConcurrentTasksPerRegion,
+            tasksInProgress, taskCounterPerServer, taskCounterPerRegion);
+    final long maxHeapSizePerRequest = 2 * 1024 * 1024;
+    // unlimiited
+    SimpleRequestController.RequestSizeChecker sizeChecker = new SimpleRequestController.RequestSizeChecker(maxHeapSizePerRequest);
+    RequestController.Checker checker = SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker));
+    ReturnCode loc1Code = checker.canTakeRow(LOC1, createPut(maxHeapSizePerRequest));
+    assertEquals(ReturnCode.INCLUDE, loc1Code);
+
+    ReturnCode loc1Code_2 = checker.canTakeRow(LOC1, createPut(maxHeapSizePerRequest));
+    // rejected for size
+    assertNotEquals(ReturnCode.INCLUDE, loc1Code_2);
+
+    ReturnCode loc2Code = checker.canTakeRow(LOC2, createPut(maxHeapSizePerRequest));
+    // rejected for size
+    assertNotEquals(ReturnCode.INCLUDE, loc2Code);
+
+    // fill the task slots for LOC3.
+    taskCounterPerRegion.put(LOC3.getRegionInfo().getRegionName(), new AtomicInteger(100));
+    taskCounterPerServer.put(LOC3.getServerName(), new AtomicInteger(100));
+
+    ReturnCode loc3Code = checker.canTakeRow(LOC3, createPut(1L));
+    // rejected for count
+    assertNotEquals(ReturnCode.INCLUDE, loc3Code);
+
+    // release the task slots for LOC3.
+    taskCounterPerRegion.put(LOC3.getRegionInfo().getRegionName(), new AtomicInteger(0));
+    taskCounterPerServer.put(LOC3.getServerName(), new AtomicInteger(0));
+
+    ReturnCode loc3Code_2 = checker.canTakeRow(LOC3, createPut(1L));
+    assertEquals(ReturnCode.INCLUDE, loc3Code_2);
+  }
+
+  @Test
+  public void testRequestSizeCheckerr() throws IOException {
+    final long maxHeapSizePerRequest = 2 * 1024 * 1024;
+    SimpleRequestController.RequestSizeChecker checker
+            = new SimpleRequestController.RequestSizeChecker(maxHeapSizePerRequest);
+
+    // inner state is unchanged.
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode code = checker.canTakeOperation(LOC1, maxHeapSizePerRequest);
+      assertEquals(ReturnCode.INCLUDE, code);
+      code = checker.canTakeOperation(LOC2, maxHeapSizePerRequest);
+      assertEquals(ReturnCode.INCLUDE, code);
+    }
+
+    // accept the data located on LOC1 region.
+    ReturnCode acceptCode = checker.canTakeOperation(LOC1, maxHeapSizePerRequest);
+    assertEquals(ReturnCode.INCLUDE, acceptCode);
+    checker.notifyFinal(acceptCode, LOC1, maxHeapSizePerRequest);
+
+    // the sn server reachs the limit.
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode code = checker.canTakeOperation(LOC1, maxHeapSizePerRequest);
+      assertNotEquals(ReturnCode.INCLUDE, code);
+      code = checker.canTakeOperation(LOC2, maxHeapSizePerRequest);
+      assertNotEquals(ReturnCode.INCLUDE, code);
+    }
+
+    // the request to sn2 server should be accepted.
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode code = checker.canTakeOperation(LOC3, maxHeapSizePerRequest);
+      assertEquals(ReturnCode.INCLUDE, code);
+    }
+
+    checker.reset();
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode code = checker.canTakeOperation(LOC1, maxHeapSizePerRequest);
+      assertEquals(ReturnCode.INCLUDE, code);
+      code = checker.canTakeOperation(LOC2, maxHeapSizePerRequest);
+      assertEquals(ReturnCode.INCLUDE, code);
+    }
+  }
+
+  @Test
+  public void testSubmittedSizeChecker() {
+    final long maxHeapSizeSubmit = 2 * 1024 * 1024;
+    SimpleRequestController.SubmittedSizeChecker checker
+            = new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit);
+
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode include = checker.canTakeOperation(LOC1, 100000);
+      assertEquals(ReturnCode.INCLUDE, include);
+    }
+
+    for (int i = 0; i != 10; ++i) {
+      checker.notifyFinal(ReturnCode.INCLUDE, LOC1, maxHeapSizeSubmit);
+    }
+
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode include = checker.canTakeOperation(LOC1, 100000);
+      assertEquals(ReturnCode.END, include);
+    }
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode include = checker.canTakeOperation(LOC2, 100000);
+      assertEquals(ReturnCode.END, include);
+    }
+    checker.reset();
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode include = checker.canTakeOperation(LOC1, 100000);
+      assertEquals(ReturnCode.INCLUDE, include);
+    }
+  }
+
+  @Test
+  public void testTaskCountChecker() throws InterruptedIOException {
+    long rowSize = 12345;
+    int maxTotalConcurrentTasks = 100;
+    int maxConcurrentTasksPerServer = 2;
+    int maxConcurrentTasksPerRegion = 1;
+    AtomicLong tasksInProgress = new AtomicLong(0);
+    Map<ServerName, AtomicInteger> taskCounterPerServer = new HashMap<>();
+    Map<byte[], AtomicInteger> taskCounterPerRegion = new HashMap<>();
+    SimpleRequestController.TaskCountChecker checker = new SimpleRequestController.TaskCountChecker(
+            maxTotalConcurrentTasks,
+            maxConcurrentTasksPerServer,
+            maxConcurrentTasksPerRegion,
+            tasksInProgress, taskCounterPerServer, taskCounterPerRegion);
+
+    // inner state is unchanged.
+    for (int i = 0; i != 10; ++i) {
+      ReturnCode code = checker.canTakeOperation(LOC1, rowSize);
+      assertEquals(ReturnCode.INCLUDE, code);
+    }
+    // add LOC1 region.
+    ReturnCode code = checker.canTakeOperation(LOC1, rowSize);
+    assertEquals(ReturnCode.INCLUDE, code);
+    checker.notifyFinal(code, LOC1, rowSize);
+
+    // fill the task slots for LOC1.
+    taskCounterPerRegion.put(LOC1.getRegionInfo().getRegionName(), new AtomicInteger(100));
+    taskCounterPerServer.put(LOC1.getServerName(), new AtomicInteger(100));
+
+    // the region was previously accepted, so it must be accpted now.
+    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
+      ReturnCode includeCode = checker.canTakeOperation(LOC1, rowSize);
+      assertEquals(ReturnCode.INCLUDE, includeCode);
+      checker.notifyFinal(includeCode, LOC1, rowSize);
+    }
+
+    // fill the task slots for LOC3.
+    taskCounterPerRegion.put(LOC3.getRegionInfo().getRegionName(), new AtomicInteger(100));
+    taskCounterPerServer.put(LOC3.getServerName(), new AtomicInteger(100));
+
+    // no task slots.
+    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
+      ReturnCode excludeCode = checker.canTakeOperation(LOC3, rowSize);
+      assertNotEquals(ReturnCode.INCLUDE, excludeCode);
+      checker.notifyFinal(excludeCode, LOC3, rowSize);
+    }
+
+    // release the tasks for LOC3.
+    taskCounterPerRegion.put(LOC3.getRegionInfo().getRegionName(), new AtomicInteger(0));
+    taskCounterPerServer.put(LOC3.getServerName(), new AtomicInteger(0));
+
+    // add LOC3 region.
+    ReturnCode code3 = checker.canTakeOperation(LOC3, rowSize);
+    assertEquals(ReturnCode.INCLUDE, code3);
+    checker.notifyFinal(code3, LOC3, rowSize);
+
+    // the region was previously accepted, so it must be accpted now.
+    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
+      ReturnCode includeCode = checker.canTakeOperation(LOC3, rowSize);
+      assertEquals(ReturnCode.INCLUDE, includeCode);
+      checker.notifyFinal(includeCode, LOC3, rowSize);
+    }
+
+    checker.reset();
+    // the region was previously accepted,
+    // but checker have reseted and task slots for LOC1 is full.
+    // So it must be rejected now.
+    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
+      ReturnCode includeCode = checker.canTakeOperation(LOC1, rowSize);
+      assertNotEquals(ReturnCode.INCLUDE, includeCode);
+      checker.notifyFinal(includeCode, LOC1, rowSize);
+    }
+  }
+
+  @Test
+  public void testWaitForMaximumCurrentTasks() throws Exception {
+    final AtomicInteger max = new AtomicInteger(0);
+    final CyclicBarrier barrier = new CyclicBarrier(2);
+    SimpleRequestController controller = new SimpleRequestController(HBaseConfiguration.create());
+    final AtomicLong tasks = controller.tasksInProgress;
+    Runnable runnable = () -> {
+      try {
+        barrier.await();
+        controller.waitForMaximumCurrentTasks(max.get(), 123, 1, null);
+      } catch (InterruptedIOException e) {
+        Assert.fail(e.getMessage());
+      } catch (InterruptedException e) {
+        // TODO Auto-generated catch block
+        e.printStackTrace();
+      } catch (BrokenBarrierException e) {
+        // TODO Auto-generated catch block
+        e.printStackTrace();
+      }
+    };
+    // First test that our runnable thread only exits when tasks is zero.
+    Thread t = new Thread(runnable);
+    t.start();
+    barrier.await();
+    t.join();
+    // Now assert we stay running if max == zero and tasks is > 0.
+    barrier.reset();
+    tasks.set(1000000);
+    t = new Thread(runnable);
+    t.start();
+    barrier.await();
+    while (tasks.get() > 0) {
+      assertTrue(t.isAlive());
+      tasks.set(tasks.get() - 1);
+    }
+    t.join();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
index ee89609..e5ab3e8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/HConnectionTestingUtility.java
@@ -126,11 +126,8 @@ public class HConnectionTestingUtility {
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(c.getNonceGenerator()).thenReturn(ng);
     Mockito.when(c.getAsyncProcess()).thenReturn(
-      new AsyncProcess(c, conf, null, RpcRetryingCallerFactory.instantiate(conf), false,
-          RpcControllerFactory.instantiate(conf), conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-              HConstants.DEFAULT_HBASE_RPC_TIMEOUT), conf.getInt(
-                  HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-          HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT)));
+      new AsyncProcess(c, conf, RpcRetryingCallerFactory.instantiate(conf), false,
+          RpcControllerFactory.instantiate(conf)));
     Mockito.when(c.getNewRpcRetryingCallerFactory(conf)).thenReturn(
         RpcRetryingCallerFactory.instantiate(conf,
             RetryingCallerInterceptorFactory.NO_OP_INTERCEPTOR, null));

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
index 53488ec..2c5e89d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestClientPushback.java
@@ -28,6 +28,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.client.AsyncProcessTask;
 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ExponentialClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
@@ -137,14 +138,20 @@ public class TestClientPushback {
     final CountDownLatch latch = new CountDownLatch(1);
     final AtomicLong endTime = new AtomicLong();
     long startTime = EnvironmentEdgeManager.currentTime();
-
-    ((HTable) table).mutator.ap.submit(null, tableName, ops, true, new Batch.Callback<Result>() {
-      @Override
-      public void update(byte[] region, byte[] row, Result result) {
+    BufferedMutatorImpl mutator = ((HTable) table).mutator;
+    Batch.Callback<Result> callback = (byte[] r, byte[] row, Result result) -> {
         endTime.set(EnvironmentEdgeManager.currentTime());
         latch.countDown();
-      }
-    }, true);
+    };
+    AsyncProcessTask<Result> task = AsyncProcessTask.newBuilder(callback)
+            .setPool(mutator.getPool())
+            .setTableName(tableName)
+            .setRowAccess(ops)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE)
+            .setOperationTimeout(conn.getConnectionConfiguration().getOperationTimeout())
+            .setRpcTimeout(60 * 1000)
+            .build();
+    mutator.getAsyncProcess().submit(task);
     // Currently the ExponentialClientBackoffPolicy under these test conditions
     // produces a backoffTime of 151 milliseconds. This is long enough so the
     // wait and related checks below are reasonable. Revisit if the backoff

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 6d1e1f0..0f7f3d9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -563,9 +563,17 @@ public class TestReplicasClient {
       gets.add(g);
       Object[] results = new Object[2];
 
-      AsyncRequestFuture reqs = ap.submitAll(
-          HTable.getDefaultExecutor(HTU.getConfiguration()),
-          table.getName(), gets, null, results);
+      int operationTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getOperationTimeout();
+      int readTimeout = ((ClusterConnection) HTU.getConnection()).getConnectionConfiguration().getReadRpcTimeout();
+      AsyncProcessTask task = AsyncProcessTask.newBuilder()
+              .setPool(HTable.getDefaultExecutor(HTU.getConfiguration()))
+              .setTableName(table.getName())
+              .setRowAccess(gets)
+              .setResults(results)
+              .setOperationTimeout(operationTimeout)
+              .setRpcTimeout(readTimeout)
+              .build();
+      AsyncRequestFuture reqs = ap.submit(task);
       reqs.waitUntilDone();
       // verify we got the right results back
       for (Object r : results) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
index be41e54..295f47a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestPerColumnFamilyFlush.java
@@ -501,7 +501,6 @@ public class TestPerColumnFamilyFlush {
           Thread.sleep(100);
         }
       }
-      table.close();
       assertEquals(maxLogs, getNumRolledLogFiles(desiredRegion));
       assertTrue(desiredRegion.getStore(FAMILY1).getMemStoreSize() > cfFlushSizeLowerBound);
       assertTrue(desiredRegion.getStore(FAMILY2).getMemStoreSize() < cfFlushSizeLowerBound);

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index 68fffb1..380c252 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -171,22 +171,35 @@ public class TestTablePermissions {
     }
   }
 
+  /**
+   * The AccessControlLists.addUserPermission may throw exception before closing the table.
+   */
+  private void addUserPermission(Configuration conf, UserPermission userPerm, Table t) throws IOException {
+    try {
+      AccessControlLists.addUserPermission(conf, userPerm, t);
+    } finally {
+      t.close();
+    }
+  }
+
   @Test
   public void testBasicWrite() throws Exception {
     Configuration conf = UTIL.getConfiguration();
-    try (Connection connection = ConnectionFactory.createConnection(conf);
-        Table table = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
       // add some permissions
-      AccessControlLists.addUserPermission(conf,
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("george"), TEST_TABLE, null, (byte[])null,
-              UserPermission.Action.READ, UserPermission.Action.WRITE), table);
-      AccessControlLists.addUserPermission(conf,
+              UserPermission.Action.READ, UserPermission.Action.WRITE),
+              connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("hubert"), TEST_TABLE, null, (byte[])null,
-              UserPermission.Action.READ), table);
-      AccessControlLists.addUserPermission(conf,
+              UserPermission.Action.READ),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("humphrey"),
               TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
-              UserPermission.Action.READ), table);
+              UserPermission.Action.READ),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
     // retrieve the same
     ListMultimap<String,TablePermission> perms =
@@ -274,23 +287,22 @@ public class TestTablePermissions {
   @Test
   public void testPersistence() throws Exception {
     Configuration conf = UTIL.getConfiguration();
-    try (Connection connection = ConnectionFactory.createConnection(conf);
-        Table table = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
-      AccessControlLists.addUserPermission(conf,
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("albert"), TEST_TABLE, null,
-              (byte[])null, TablePermission.Action.READ), table);
-      AccessControlLists.addUserPermission(conf,
+              (byte[])null, TablePermission.Action.READ), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("betty"), TEST_TABLE, null,
               (byte[])null, TablePermission.Action.READ,
-              TablePermission.Action.WRITE), table);
-      AccessControlLists.addUserPermission(conf,
+              TablePermission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("clark"),
               TEST_TABLE, TEST_FAMILY,
-              TablePermission.Action.READ), table);
-      AccessControlLists.addUserPermission(conf,
+              TablePermission.Action.READ), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("dwight"),
               TEST_TABLE, TEST_FAMILY, TEST_QUALIFIER,
-              TablePermission.Action.WRITE), table);
+              TablePermission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
     // verify permissions survive changes in table metadata
     ListMultimap<String,TablePermission> preperms =
@@ -404,17 +416,17 @@ public class TestTablePermissions {
     Configuration conf = UTIL.getConfiguration();
 
     // add some permissions
-    try (Connection connection = ConnectionFactory.createConnection(conf);
-        Table table = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
-      AccessControlLists.addUserPermission(conf,
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("user1"),
-              Permission.Action.READ, Permission.Action.WRITE), table);
-      AccessControlLists.addUserPermission(conf,
+              Permission.Action.READ, Permission.Action.WRITE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("user2"),
-              Permission.Action.CREATE), table);
-      AccessControlLists.addUserPermission(conf,
+              Permission.Action.CREATE), connection.getTable(AccessControlLists.ACL_TABLE_NAME));
+      addUserPermission(conf,
           new UserPermission(Bytes.toBytes("user3"),
-              Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.CREATE), table);
+              Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.CREATE),
+          connection.getTable(AccessControlLists.ACL_TABLE_NAME));
     }
     ListMultimap<String,TablePermission> perms = AccessControlLists.getTablePermissions(conf, null);
     List<TablePermission> user1Perms = perms.get("user1");
@@ -448,11 +460,11 @@ public class TestTablePermissions {
     // currently running user is the system user and should have global admin perms
     User currentUser = User.getCurrent();
     assertTrue(authManager.authorize(currentUser, Permission.Action.ADMIN));
-    try (Connection connection = ConnectionFactory.createConnection(conf);
-        Table table = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
+    try (Connection connection = ConnectionFactory.createConnection(conf)) {
       for (int i=1; i<=50; i++) {
-        AccessControlLists.addUserPermission(conf, new UserPermission(Bytes.toBytes("testauth"+i),
-            Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.WRITE), table);
+        addUserPermission(conf, new UserPermission(Bytes.toBytes("testauth"+i),
+            Permission.Action.ADMIN, Permission.Action.READ, Permission.Action.WRITE),
+            connection.getTable(AccessControlLists.ACL_TABLE_NAME));
         // make sure the system user still shows as authorized
         assertTrue("Failed current user auth check on iter "+i,
             authManager.authorize(currentUser, Permission.Action.ADMIN));


[11/50] [abbrv] hbase git commit: HBASE-17262 Refactor RpcServer so as to make it extendable and/or pluggable

Posted by sy...@apache.org.
HBASE-17262 Refactor RpcServer so as to make it extendable and/or pluggable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc93de51
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc93de51
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc93de51

Branch: refs/heads/hbase-12439
Commit: fc93de51aff2c917a2b89694cf16ca37ccde6723
Parents: d787155
Author: binlijin <bi...@gmail.com>
Authored: Thu Dec 22 14:49:56 2016 +0800
Committer: binlijin <bi...@gmail.com>
Committed: Thu Dec 22 14:49:56 2016 +0800

----------------------------------------------------------------------
 .../hbase/ipc/IntegrationTestRpcClient.java     |   53 +-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java |    3 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 2127 ++----------------
 .../hadoop/hbase/ipc/RpcServerFactory.java      |   58 +
 .../hadoop/hbase/ipc/SimpleRpcServer.java       | 1997 ++++++++++++++++
 .../hbase/regionserver/RSRpcServices.java       |    3 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java       |   85 +-
 .../hadoop/hbase/ipc/TestProtoBufRpc.java       |    2 +-
 .../hbase/ipc/TestRpcHandlerException.java      |   19 +-
 .../hadoop/hbase/security/TestSecureIPC.java    |    3 +-
 .../security/token/TestTokenAuthentication.java |   11 +-
 11 files changed, 2283 insertions(+), 2078 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
index 7ce86bd..219a4e0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
@@ -25,11 +25,6 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
-import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
@@ -45,20 +40,20 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.codec.Codec;
+import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoRequestProto;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProtos.EchoResponseProto;
 import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto.BlockingInterface;
-import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
-import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Lists;
+
 @Category(IntegrationTests.class)
 public class IntegrationTestRpcClient {
 
@@ -72,26 +67,6 @@ public class IntegrationTestRpcClient {
     conf = HBaseConfiguration.create();
   }
 
-  static class TestRpcServer extends RpcServer {
-
-    TestRpcServer(Configuration conf) throws IOException {
-      this(new FifoRpcScheduler(conf, 1), conf);
-    }
-
-    TestRpcServer(RpcScheduler scheduler, Configuration conf) throws IOException {
-      super(null, "testRpcServer", Lists
-          .newArrayList(new BlockingServiceAndInterface(SERVICE, null)), new InetSocketAddress(
-          "localhost", 0), conf, scheduler);
-    }
-
-    @Override
-    public Pair<Message, CellScanner> call(BlockingService service, MethodDescriptor md,
-        Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status)
-        throws IOException {
-      return super.call(service, md, param, cellScanner, receiveTime, status);
-    }
-  }
-
   protected AbstractRpcClient<?> createRpcClient(Configuration conf, boolean isSyncClient) {
     return isSyncClient ? new BlockingRpcClient(conf) : new NettyRpcClient(conf) {
       @Override
@@ -116,8 +91,8 @@ public class IntegrationTestRpcClient {
   class Cluster {
     Random random = new Random();
     ReadWriteLock lock = new ReentrantReadWriteLock();
-    HashMap<InetSocketAddress, TestRpcServer> rpcServers = new HashMap<>();
-    List<TestRpcServer> serverList = new ArrayList<>();
+    HashMap<InetSocketAddress, RpcServer> rpcServers = new HashMap<>();
+    List<RpcServer> serverList = new ArrayList<>();
     int maxServers;
     int minServers;
 
@@ -126,14 +101,18 @@ public class IntegrationTestRpcClient {
       this.maxServers = maxServers;
     }
 
-    TestRpcServer startServer() throws IOException {
+    RpcServer startServer() throws IOException {
       lock.writeLock().lock();
       try {
         if (rpcServers.size() >= maxServers) {
           return null;
         }
 
-        TestRpcServer rpcServer = new TestRpcServer(conf);
+        RpcServer rpcServer = RpcServerFactory.createRpcServer(null,
+            "testRpcServer", Lists
+                .newArrayList(new BlockingServiceAndInterface(SERVICE, null)),
+            new InetSocketAddress("localhost", 0), conf, new FifoRpcScheduler(
+                conf, 1));
         rpcServer.start();
         InetSocketAddress address = rpcServer.getListenerAddress();
         if (address == null) {
@@ -150,7 +129,7 @@ public class IntegrationTestRpcClient {
 
     void stopRandomServer() throws Exception {
       lock.writeLock().lock();
-      TestRpcServer rpcServer = null;
+      RpcServer rpcServer = null;
       try {
         if (rpcServers.size() <= minServers) {
           return;
@@ -174,7 +153,7 @@ public class IntegrationTestRpcClient {
       }
     }
 
-    void stopServer(TestRpcServer rpcServer) throws InterruptedException {
+    void stopServer(RpcServer rpcServer) throws InterruptedException {
       InetSocketAddress address = rpcServer.getListenerAddress();
       LOG.info("Stopping server: " + address);
       rpcServer.stop();
@@ -185,7 +164,7 @@ public class IntegrationTestRpcClient {
     void stopRunning() throws InterruptedException {
       lock.writeLock().lock();
       try {
-        for (TestRpcServer rpcServer : serverList) {
+        for (RpcServer rpcServer : serverList) {
           stopServer(rpcServer);
         }
 
@@ -194,7 +173,7 @@ public class IntegrationTestRpcClient {
       }
     }
 
-    TestRpcServer getRandomServer() {
+    RpcServer getRandomServer() {
       lock.readLock().lock();
       try {
         int size = rpcServers.size();
@@ -278,7 +257,7 @@ public class IntegrationTestRpcClient {
         String message = isBigPayload ? BIG_PAYLOAD : id + numCalls;
         EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message).build();
         EchoResponseProto ret;
-        TestRpcServer server = cluster.getRandomServer();
+        RpcServer server = cluster.getRandomServer();
         try {
           sending.set(true);
           BlockingInterface stub = newBlockingStub(rpcClient, server.getListenerAddress());

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc93de51/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
index 5301a67..0aabc10 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
@@ -1,4 +1,3 @@
-package org.apache.hadoop.hbase.ipc;
 /**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
@@ -16,6 +15,8 @@ package org.apache.hadoop.hbase.ipc;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.hbase.ipc;
+
 import java.net.InetSocketAddress;
 import java.nio.channels.ClosedChannelException;
 


[48/50] [abbrv] hbase git commit: HBASE-17373: Fixing bug in moving segments from compaction pipeline to snapshot

Posted by sy...@apache.org.
HBASE-17373: Fixing bug in moving segments from compaction pipeline to snapshot

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/69ce5967
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/69ce5967
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/69ce5967

Branch: refs/heads/hbase-12439
Commit: 69ce5967fd3b8f33486239bcdd7e5e4a817691b9
Parents: c3d5f26
Author: eshcar <es...@yahoo-inc.com>
Authored: Tue Jan 3 15:28:10 2017 +0200
Committer: Michael Stack <st...@apache.org>
Committed: Tue Jan 3 19:13:52 2017 -0800

----------------------------------------------------------------------
 .../hbase/regionserver/CompactingMemStore.java  | 19 ++++--
 .../hbase/regionserver/CompactionPipeline.java  | 69 ++++++++++----------
 .../regionserver/VersionedSegmentsList.java     |  5 +-
 .../client/TestAsyncTableGetMultiThreaded.java  | 22 +++++--
 ...ableGetMultiThreadedWithBasicCompaction.java | 35 ++++++++++
 ...ableGetMultiThreadedWithEagerCompaction.java | 35 ++++++++++
 6 files changed, 137 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index f8192a2..e1289f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -213,8 +213,10 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
+  // the getSegments() method is used for tests only
+  @VisibleForTesting
   @Override
-  public List<Segment> getSegments() {
+  protected List<Segment> getSegments() {
     List<Segment> pipelineList = pipeline.getSegments();
     List<Segment> list = new ArrayList<Segment>(pipelineList.size() + 2);
     list.add(this.active);
@@ -266,6 +268,7 @@ public class CompactingMemStore extends AbstractMemStore {
     long order = pipelineList.size();
     // The list of elements in pipeline + the active element + the snapshot segment
     // TODO : This will change when the snapshot is made of more than one element
+    // The order is the Segment ordinal
     List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(pipelineList.size() + 2);
     list.add(this.active.getScanner(readPt, order + 1));
     for (Segment item : pipelineList) {
@@ -374,10 +377,18 @@ public class CompactingMemStore extends AbstractMemStore {
   }
 
   private void pushTailToSnapshot() {
-    ImmutableSegment tail = pipeline.pullTail();
-    if (!tail.isEmpty()) {
-      this.snapshot = tail;
+    VersionedSegmentsList segments = pipeline.getVersionedTail();
+    pushToSnapshot(segments.getStoreSegments());
+    pipeline.swap(segments,null,false); // do not close segments as they are in snapshot now
+  }
+
+  private void pushToSnapshot(List<ImmutableSegment> segments) {
+    if(segments.isEmpty()) return;
+    if(segments.size() == 1 && !segments.get(0).isEmpty()) {
+      this.snapshot = segments.get(0);
+      return;
     }
+    // TODO else craete composite snapshot
   }
 
   private RegionServicesForStores getRegionServices() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index 6676170..9d5df77 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -45,18 +46,14 @@ public class CompactionPipeline {
   public final static long FIXED_OVERHEAD = ClassSize
       .align(ClassSize.OBJECT + (2 * ClassSize.REFERENCE) + Bytes.SIZEOF_LONG);
   public final static long DEEP_OVERHEAD = FIXED_OVERHEAD + ClassSize.LINKEDLIST;
-  public final static long ENTRY_OVERHEAD = ClassSize.LINKEDLIST_ENTRY;
 
   private final RegionServicesForStores region;
   private LinkedList<ImmutableSegment> pipeline;
   private long version;
 
-  private static final ImmutableSegment EMPTY_MEM_STORE_SEGMENT = SegmentFactory.instance()
-      .createImmutableSegment((CellComparator) null);
-
   public CompactionPipeline(RegionServicesForStores region) {
     this.region = region;
-    this.pipeline = new LinkedList<ImmutableSegment>();
+    this.pipeline = new LinkedList<>();
     this.version = 0;
   }
 
@@ -68,31 +65,33 @@ public class CompactionPipeline {
     }
   }
 
-  public ImmutableSegment pullTail() {
+  public VersionedSegmentsList getVersionedList() {
     synchronized (pipeline){
-      if(pipeline.isEmpty()) {
-        return EMPTY_MEM_STORE_SEGMENT;
-      }
-      return removeLast();
+      List<ImmutableSegment> segmentList = new ArrayList<>(pipeline);
+      return new VersionedSegmentsList(segmentList, version);
     }
   }
 
-  public VersionedSegmentsList getVersionedList() {
+  public VersionedSegmentsList getVersionedTail() {
     synchronized (pipeline){
-      LinkedList<ImmutableSegment> segmentList = new LinkedList<ImmutableSegment>(pipeline);
-      VersionedSegmentsList res = new VersionedSegmentsList(segmentList, version);
-      return res;
+      List<ImmutableSegment> segmentList = new ArrayList<>();
+      if(!pipeline.isEmpty()) {
+        segmentList.add(0, pipeline.getLast());
+      }
+      return new VersionedSegmentsList(segmentList, version);
     }
   }
 
   /**
-   * Swaps the versioned list at the tail of the pipeline with the new compacted segment.
-   * Swapping only if there were no changes to the suffix of the list while it was compacted.
-   * @param versionedList tail of the pipeline that was compacted
-   * @param segment new compacted segment
+   * Swaps the versioned list at the tail of the pipeline with a new segment.
+   * Swapping only if there were no changes to the suffix of the list since the version list was
+   * created.
+   * @param versionedList suffix of the pipeline to be replaced can be tail or all the pipeline
+   * @param segment new segment to replace the suffix. Can be null if the suffix just needs to be
+   *                removed.
    * @param closeSuffix whether to close the suffix (to release memory), as part of swapping it out
    *        During index merge op this will be false and for compaction it will be true.
-   * @return true iff swapped tail with new compacted segment
+   * @return true iff swapped tail with new segment
    */
   public boolean swap(
       VersionedSegmentsList versionedList, ImmutableSegment segment, boolean closeSuffix) {
@@ -106,26 +105,32 @@ public class CompactionPipeline {
       }
       suffix = versionedList.getStoreSegments();
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Swapping pipeline suffix with compacted item. "
+        int count = 0;
+        if(segment != null) {
+          count = segment.getCellsCount();
+        }
+        LOG.debug("Swapping pipeline suffix. "
             + "Just before the swap the number of segments in pipeline is:"
             + versionedList.getStoreSegments().size()
-            + ", and the number of cells in new segment is:" + segment.getCellsCount());
+            + ", and the number of cells in new segment is:" + count);
       }
-      swapSuffix(suffix,segment, closeSuffix);
+      swapSuffix(suffix, segment, closeSuffix);
     }
-    if (region != null) {
+    if (closeSuffix && region != null) {
       // update the global memstore size counter
       long suffixDataSize = getSegmentsKeySize(suffix);
-      long newDataSize = segment.keySize();
+      long newDataSize = 0;
+      if(segment != null) newDataSize = segment.keySize();
       long dataSizeDelta = suffixDataSize - newDataSize;
       long suffixHeapOverhead = getSegmentsHeapOverhead(suffix);
-      long newHeapOverhead = segment.heapOverhead();
+      long newHeapOverhead = 0;
+      if(segment != null) newHeapOverhead = segment.heapOverhead();
       long heapOverheadDelta = suffixHeapOverhead - newHeapOverhead;
       region.addMemstoreSize(new MemstoreSize(-dataSizeDelta, -heapOverheadDelta));
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Suffix data size: " + suffixDataSize + " compacted item data size: "
+        LOG.debug("Suffix data size: " + suffixDataSize + " new segment data size: "
             + newDataSize + ". Suffix heap overhead: " + suffixHeapOverhead
-            + " compacted item heap overhead: " + newHeapOverhead);
+            + " new segment heap overhead: " + newHeapOverhead);
       }
     }
     return true;
@@ -193,8 +198,7 @@ public class CompactionPipeline {
 
   public List<Segment> getSegments() {
     synchronized (pipeline){
-      List<Segment> res = new LinkedList<Segment>(pipeline);
-      return res;
+      return new LinkedList<>(pipeline);
     }
   }
 
@@ -230,12 +234,7 @@ public class CompactionPipeline {
       }
     }
     pipeline.removeAll(suffix);
-    pipeline.addLast(segment);
-  }
-
-  private ImmutableSegment removeLast() {
-    version++;
-    return pipeline.removeLast();
+    if(segment != null) pipeline.addLast(segment);
   }
 
   private boolean addFirst(ImmutableSegment segment) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java
index 01160bf..ab751f3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/VersionedSegmentsList.java
@@ -18,7 +18,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -36,10 +35,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class VersionedSegmentsList {
 
-  private final LinkedList<ImmutableSegment> storeSegments;
+  private final List<ImmutableSegment> storeSegments;
   private final long version;
 
-  public VersionedSegmentsList(LinkedList<ImmutableSegment> storeSegments, long version) {
+  public VersionedSegmentsList(List<ImmutableSegment> storeSegments, long version) {
     this.storeSegments = storeSegments;
     this.version = version;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index da8141b..82fe3cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -33,17 +33,18 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
-import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
+import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -75,11 +76,18 @@ public class TestAsyncTableGetMultiThreaded {
 
   @BeforeClass
   public static void setUp() throws Exception {
+    setUp(HColumnDescriptor.MemoryCompaction.NONE);
+  }
+
+  protected static void setUp(HColumnDescriptor.MemoryCompaction memoryCompaction) throws Exception {
     TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
     TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);
     TEST_UTIL.getConfiguration().setLong(HBASE_RPC_READ_TIMEOUT_KEY, 1000L);
     TEST_UTIL.getConfiguration().setInt(HBASE_CLIENT_RETRIES_NUMBER, 1000);
     TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100);
+    TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
+        String.valueOf(memoryCompaction));
+
     TEST_UTIL.startMiniCluster(5);
     SPLIT_KEYS = new byte[8][];
     for (int i = 111; i < 999; i += 111) {
@@ -103,11 +111,13 @@ public class TestAsyncTableGetMultiThreaded {
 
   private void run(AtomicBoolean stop) throws InterruptedException, ExecutionException {
     while (!stop.get()) {
-      int i = ThreadLocalRandom.current().nextInt(COUNT);
-      assertEquals(i,
-        Bytes.toInt(
-          CONN.getRawTable(TABLE_NAME).get(new Get(Bytes.toBytes(String.format("%03d", i)))).get()
-              .getValue(FAMILY, QUALIFIER)));
+      for (int i = 0; i < COUNT; i++) {
+        assertEquals(i,
+            Bytes.toInt(
+                CONN.getRawTable(TABLE_NAME).get(new Get(Bytes.toBytes(String.format("%03d", i))))
+                    .get()
+                    .getValue(FAMILY, QUALIFIER)));
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
new file mode 100644
index 0000000..eb07875
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends
+    TestAsyncTableGetMultiThreaded {
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    setUp(HColumnDescriptor.MemoryCompaction.BASIC);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/69ce5967/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
new file mode 100644
index 0000000..6fe8045
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.BeforeClass;
+import org.junit.experimental.categories.Category;
+
+@Category({ LargeTests.class, ClientTests.class })
+public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends
+    TestAsyncTableGetMultiThreaded {
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    setUp(HColumnDescriptor.MemoryCompaction.EAGER);
+  }
+
+}


[24/50] [abbrv] hbase git commit: HBASE-17174 Refactor the AsyncProcess, BufferedMutatorImpl, and HTable

Posted by sy...@apache.org.
HBASE-17174 Refactor the AsyncProcess, BufferedMutatorImpl, and HTable

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8cb55c40
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8cb55c40
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8cb55c40

Branch: refs/heads/hbase-12439
Commit: 8cb55c4080206a651023f6d042fac295192f1c2b
Parents: 992e571
Author: ChiaPing Tsai <ch...@gmail.com>
Authored: Sat Dec 24 12:02:05 2016 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sat Dec 24 12:02:05 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/AsyncProcess.java       | 704 +++--------------
 .../hadoop/hbase/client/AsyncProcessTask.java   | 229 ++++++
 .../hbase/client/AsyncRequestFutureImpl.java    |  46 +-
 .../hbase/client/BufferedMutatorImpl.java       | 165 ++--
 .../hbase/client/BufferedMutatorParams.java     |  21 +-
 .../hbase/client/ConnectionConfiguration.java   |  19 +-
 .../hbase/client/ConnectionImplementation.java  |  13 +-
 .../org/apache/hadoop/hbase/client/HTable.java  | 295 ++++---
 .../hadoop/hbase/client/HTableMultiplexer.java  |  15 +-
 .../hadoop/hbase/client/RequestController.java  | 125 +++
 .../hbase/client/RequestControllerFactory.java  |  44 ++
 .../apache/hadoop/hbase/client/RowAccess.java   |   3 +-
 .../hbase/client/SimpleRequestController.java   | 519 +++++++++++++
 .../hadoop/hbase/client/TestAsyncProcess.java   | 769 ++++++++-----------
 .../client/TestSimpleRequestController.java     | 336 ++++++++
 .../hbase/client/HConnectionTestingUtility.java |   7 +-
 .../hadoop/hbase/client/TestClientPushback.java |  19 +-
 .../hadoop/hbase/client/TestReplicasClient.java |  14 +-
 .../regionserver/TestPerColumnFamilyFlush.java  |   1 -
 .../security/access/TestTablePermissions.java   |  72 +-
 20 files changed, 2128 insertions(+), 1288 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 50a2a11..d1583f5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -19,45 +19,35 @@
 
 package org.apache.hadoop.hbase.client;
 
-import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
 import com.google.common.annotations.VisibleForTesting;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.atomic.AtomicInteger;
+import java.util.Objects;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
-import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
+import org.apache.hadoop.hbase.client.RequestController.ReturnCode;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdge;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * This class  allows a continuous flow of requests. It's written to be compatible with a
@@ -95,9 +85,10 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
  * </p>
  */
 @InterfaceAudience.Private
+@InterfaceStability.Evolving
 class AsyncProcess {
   private static final Log LOG = LogFactory.getLog(AsyncProcess.class);
-  protected static final AtomicLong COUNTER = new AtomicLong();
+  private static final AtomicLong COUNTER = new AtomicLong();
 
   public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget";
 
@@ -116,31 +107,6 @@ class AsyncProcess {
    */
   public static final String LOG_DETAILS_FOR_BATCH_ERROR = "hbase.client.log.batcherrors.details";
 
-  protected final int thresholdToLogUndoneTaskDetails;
-  private static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS =
-      "hbase.client.threshold.log.details";
-  private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10;
-  private static final int THRESHOLD_TO_LOG_REGION_DETAILS = 2;
-
-  /**
-   * The maximum size of single RegionServer.
-   */
-  public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = "hbase.client.max.perrequest.heapsize";
-
-  /**
-   * Default value of #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE
-   */
-  public static final long DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = 4194304;
-
-  /**
-   * The maximum size of submit.
-   */
-  public static final String HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = "hbase.client.max.submit.heapsize";
-  /**
-   * Default value of #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE
-   */
-  public static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE;
-
   /**
    * Return value from a submit that didn't contain any requests.
    */
@@ -173,64 +139,42 @@ class AsyncProcess {
   };
 
   // TODO: many of the fields should be made private
-  protected final long id;
-
-  protected final ClusterConnection connection;
-  protected final RpcRetryingCallerFactory rpcCallerFactory;
-  protected final RpcControllerFactory rpcFactory;
-  protected final BatchErrors globalErrors;
-  protected final ExecutorService pool;
-
-  protected final AtomicLong tasksInProgress = new AtomicLong(0);
-  protected final ConcurrentMap<byte[], AtomicInteger> taskCounterPerRegion =
-      new ConcurrentSkipListMap<byte[], AtomicInteger>(Bytes.BYTES_COMPARATOR);
-  protected final ConcurrentMap<ServerName, AtomicInteger> taskCounterPerServer =
-      new ConcurrentHashMap<ServerName, AtomicInteger>();
-  // Start configuration settings.
-  protected final int startLogErrorsCnt;
+  final long id;
 
-  /**
-   * The number of tasks simultaneously executed on the cluster.
-   */
-  protected final int maxTotalConcurrentTasks;
+  final ClusterConnection connection;
+  private final RpcRetryingCallerFactory rpcCallerFactory;
+  final RpcControllerFactory rpcFactory;
+  final BatchErrors globalErrors;
 
-  /**
-   * The max heap size of all tasks simultaneously executed on a server.
-   */
-  protected final long maxHeapSizePerRequest;
-  protected final long maxHeapSizeSubmit;
-  /**
-   * The number of tasks we run in parallel on a single region.
-   * With 1 (the default) , we ensure that the ordering of the queries is respected: we don't start
-   * a set of operations on a region before the previous one is done. As well, this limits
-   * the pressure we put on the region server.
-   */
-  protected final int maxConcurrentTasksPerRegion;
+  // Start configuration settings.
+  final int startLogErrorsCnt;
 
-  /**
-   * The number of task simultaneously executed on a single region server.
-   */
-  protected final int maxConcurrentTasksPerServer;
-  protected final long pause;
-  protected final long pauseForCQTBE;// pause for CallQueueTooBigException, if specified
-  protected int numTries;
-  protected int serverTrackerTimeout;
-  protected int rpcTimeout;
-  protected int operationTimeout;
-  protected long primaryCallTimeoutMicroseconds;
+  final long pause;
+  final long pauseForCQTBE;// pause for CallQueueTooBigException, if specified
+  final int numTries;
+  @VisibleForTesting
+  int serverTrackerTimeout;
+  final long primaryCallTimeoutMicroseconds;
   /** Whether to log details for batch errors */
-  protected final boolean logBatchErrorDetails;
+  final boolean logBatchErrorDetails;
   // End configuration settings.
 
-  public AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
+  /**
+   * The traffic control for requests.
+   */
+  @VisibleForTesting
+  final RequestController requestController;
+  public static final String LOG_DETAILS_PERIOD = "hbase.client.log.detail.period.ms";
+  private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000;
+  private final int periodToLog;
+  AsyncProcess(ClusterConnection hc, Configuration conf,
       RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors,
-      RpcControllerFactory rpcFactory, int rpcTimeout, int operationTimeout) {
+      RpcControllerFactory rpcFactory) {
     if (hc == null) {
       throw new IllegalArgumentException("ClusterConnection cannot be null.");
     }
 
     this.connection = hc;
-    this.pool = pool;
     this.globalErrors = useGlobalErrors ? new BatchErrors() : null;
 
     this.id = COUNTER.incrementAndGet();
@@ -249,42 +193,10 @@ class AsyncProcess {
     // how many times we could try in total, one more than retry number
     this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
         HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1;
-    this.rpcTimeout = rpcTimeout;
-    this.operationTimeout = operationTimeout;
     this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000);
-
-    this.maxTotalConcurrentTasks = conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-      HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
-    this.maxConcurrentTasksPerServer = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
-          HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS);
-    this.maxConcurrentTasksPerRegion = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
-          HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS);
-    this.maxHeapSizePerRequest = conf.getLong(HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
-          DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
-    this.maxHeapSizeSubmit = conf.getLong(HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE);
     this.startLogErrorsCnt =
         conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT);
-
-    if (this.maxTotalConcurrentTasks <= 0) {
-      throw new IllegalArgumentException("maxTotalConcurrentTasks=" + maxTotalConcurrentTasks);
-    }
-    if (this.maxConcurrentTasksPerServer <= 0) {
-      throw new IllegalArgumentException("maxConcurrentTasksPerServer=" +
-          maxConcurrentTasksPerServer);
-    }
-    if (this.maxConcurrentTasksPerRegion <= 0) {
-      throw new IllegalArgumentException("maxConcurrentTasksPerRegion=" +
-          maxConcurrentTasksPerRegion);
-    }
-    if (this.maxHeapSizePerRequest <= 0) {
-      throw new IllegalArgumentException("maxHeapSizePerServer=" +
-          maxHeapSizePerRequest);
-    }
-
-    if (this.maxHeapSizeSubmit <= 0) {
-      throw new IllegalArgumentException("maxHeapSizeSubmit=" +
-          maxHeapSizeSubmit);
-    }
+    this.periodToLog = conf.getInt(LOG_DETAILS_PERIOD, DEFAULT_LOG_DETAILS_PERIOD);
     // Server tracker allows us to do faster, and yet useful (hopefully), retries.
     // However, if we are too useful, we might fail very quickly due to retry count limit.
     // To avoid this, we are going to cheat for now (see HBASE-7659), and calculate maximum
@@ -301,43 +213,30 @@ class AsyncProcess {
     this.rpcFactory = rpcFactory;
     this.logBatchErrorDetails = conf.getBoolean(LOG_DETAILS_FOR_BATCH_ERROR, false);
 
-    this.thresholdToLogUndoneTaskDetails =
-        conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS,
-          DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
+    this.requestController = RequestControllerFactory.create(conf);
   }
 
   /**
-   * @return pool if non null, otherwise returns this.pool if non null, otherwise throws
-   *         RuntimeException
+   * The submitted task may be not accomplished at all if there are too many running tasks or
+   * other limits.
+   * @param <CResult> The class to cast the result
+   * @param task The setting and data
+   * @return AsyncRequestFuture
    */
-  protected ExecutorService getPool(ExecutorService pool) {
-    if (pool != null) {
-      return pool;
-    }
-    if (this.pool != null) {
-      return this.pool;
+  public <CResult> AsyncRequestFuture submit(AsyncProcessTask<CResult> task) throws InterruptedIOException {
+    AsyncRequestFuture reqFuture = checkTask(task);
+    if (reqFuture != null) {
+      return reqFuture;
+    }
+    SubmittedRows submittedRows = task.getSubmittedRows() == null ? SubmittedRows.ALL : task.getSubmittedRows();
+    switch (submittedRows) {
+      case ALL:
+        return submitAll(task);
+      case AT_LEAST_ONE:
+        return submit(task, true);
+      default:
+        return submit(task, false);
     }
-    throw new RuntimeException("Neither AsyncProcess nor request have ExecutorService");
-  }
-
-  /**
-   * See #submit(ExecutorService, TableName, RowAccess, boolean, Batch.Callback, boolean).
-   * Uses default ExecutorService for this AP (must have been created with one).
-   */
-  public <CResult> AsyncRequestFuture submit(TableName tableName,
-      final RowAccess<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
-      boolean needResults) throws InterruptedIOException {
-    return submit(null, tableName, rows, atLeastOne, callback, needResults);
-  }
-  /**
-   * See {@link #submit(ExecutorService, TableName, RowAccess, boolean, Batch.Callback, boolean)}.
-   * Uses the {@link ListRowAccess} to wrap the {@link List}.
-   */
-  public <CResult> AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
-      List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
-      boolean needResults) throws InterruptedIOException {
-    return submit(pool, tableName, new ListRowAccess(rows), atLeastOne,
-      callback, needResults);
   }
 
   /**
@@ -345,20 +244,13 @@ class AsyncProcess {
    * list. Does not send requests to replicas (not currently used for anything other
    * than streaming puts anyway).
    *
-   * @param pool ExecutorService to use.
-   * @param tableName The table for which this request is needed.
-   * @param callback Batch callback. Only called on success (94 behavior).
-   * @param needResults Whether results are needed, or can be discarded.
-   * @param rows - the submitted row. Modified by the method: we remove the rows we took.
+   * @param task The setting and data
    * @param atLeastOne true if we should submit at least a subset.
    */
-  public <CResult> AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
-      RowAccess<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
-      boolean needResults) throws InterruptedIOException {
-    if (rows.isEmpty()) {
-      return NO_REQS_RESULT;
-    }
-
+  private <CResult> AsyncRequestFuture submit(AsyncProcessTask<CResult> task,
+    boolean atLeastOne) throws InterruptedIOException {
+    TableName tableName = task.getTableName();
+    RowAccess<? extends Row> rows = task.getRowAccess();
     Map<ServerName, MultiAction> actionsByServer =
         new HashMap<ServerName, MultiAction>();
     List<Action> retainedActions = new ArrayList<Action>(rows.size());
@@ -369,11 +261,11 @@ class AsyncProcess {
     // Location errors that happen before we decide what requests to take.
     List<Exception> locationErrors = null;
     List<Integer> locationErrorRows = null;
-    RowCheckerHost checker = createRowCheckerHost();
+    RequestController.Checker checker = requestController.newChecker();
     boolean firstIter = true;
     do {
       // Wait until there is at least one slot for a new task.
-      waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, tableName.getNameAsString());
+      requestController.waitForFreeSlot(id, periodToLog, getLogger(tableName, -1));
       int posInList = -1;
       if (!firstIter) {
         checker.reset();
@@ -406,8 +298,7 @@ class AsyncProcess {
           it.remove();
           break; // Backward compat: we stop considering actions on location error.
         }
-        long rowSize = (r instanceof Mutation) ? ((Mutation) r).heapSize() : 0;
-        ReturnCode code = checker.canTakeOperation(loc, rowSize);
+        ReturnCode code = checker.canTakeRow(loc, r);
         if (code == ReturnCode.END) {
           break;
         }
@@ -426,29 +317,14 @@ class AsyncProcess {
 
     if (retainedActions.isEmpty()) return NO_REQS_RESULT;
 
-    return submitMultiActions(tableName, retainedActions, nonceGroup, callback, null, needResults,
-        locationErrors, locationErrorRows, actionsByServer, pool);
+    return submitMultiActions(task, retainedActions, nonceGroup,
+        locationErrors, locationErrorRows, actionsByServer);
   }
 
-  private RowCheckerHost createRowCheckerHost() {
-    return new RowCheckerHost(Arrays.asList(
-        new TaskCountChecker(maxTotalConcurrentTasks,
-          maxConcurrentTasksPerServer,
-          maxConcurrentTasksPerRegion,
-          tasksInProgress,
-          taskCounterPerServer,
-          taskCounterPerRegion)
-        , new RequestSizeChecker(maxHeapSizePerRequest)
-        , new SubmittedSizeChecker(maxHeapSizeSubmit)
-    ));
-  }
-  <CResult> AsyncRequestFuture submitMultiActions(TableName tableName,
-      List<Action> retainedActions, long nonceGroup, Batch.Callback<CResult> callback,
-      Object[] results, boolean needResults, List<Exception> locationErrors,
-      List<Integer> locationErrorRows, Map<ServerName, MultiAction> actionsByServer,
-      ExecutorService pool) {
-    AsyncRequestFutureImpl<CResult> ars = createAsyncRequestFuture(
-      tableName, retainedActions, nonceGroup, pool, callback, results, needResults, null, -1);
+  <CResult> AsyncRequestFuture submitMultiActions(AsyncProcessTask task,
+      List<Action> retainedActions, long nonceGroup, List<Exception> locationErrors,
+      List<Integer> locationErrorRows, Map<ServerName, MultiAction> actionsByServer) {
+    AsyncRequestFutureImpl<CResult> ars = createAsyncRequestFuture(task, retainedActions, nonceGroup);
     // Add location errors if any
     if (locationErrors != null) {
       for (int i = 0; i < locationErrors.size(); ++i) {
@@ -462,14 +338,6 @@ class AsyncProcess {
     return ars;
   }
 
-  public void setRpcTimeout(int rpcTimeout) {
-    this.rpcTimeout = rpcTimeout;
-  }
-
-  public void setOperationTimeout(int operationTimeout) {
-    this.operationTimeout = operationTimeout;
-  }
-
   /**
    * Helper that is used when grouping the actions per region server.
    *
@@ -493,24 +361,13 @@ class AsyncProcess {
     multiAction.add(regionName, action);
   }
 
-  public <CResult> AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
-      List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results) {
-    return submitAll(pool, tableName, rows, callback, results, null, -1);
-  }
   /**
    * Submit immediately the list of rows, whatever the server status. Kept for backward
    * compatibility: it allows to be used with the batch interface that return an array of objects.
-   *
-   * @param pool ExecutorService to use.
-   * @param tableName name of the table for which the submission is made.
-   * @param rows the list of rows.
-   * @param callback the callback.
-   * @param results Optional array to return the results thru; backward compat.
-   * @param rpcTimeout rpc timeout for this batch, set -1 if want to use current setting.
+   * @param task The setting and data
    */
-  public <CResult> AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
-      List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results,
-      CancellableRegionServerCallable callable, int rpcTimeout) {
+  private <CResult> AsyncRequestFuture submitAll(AsyncProcessTask task) {
+    RowAccess<? extends Row> rows = task.getRowAccess();
     List<Action> actions = new ArrayList<Action>(rows.size());
 
     // The position will be used by the processBatch to match the object array returned.
@@ -528,93 +385,78 @@ class AsyncProcess {
       setNonce(ng, r, action);
       actions.add(action);
     }
-    AsyncRequestFutureImpl<CResult> ars = createAsyncRequestFuture(
-        tableName, actions, ng.getNonceGroup(), getPool(pool), callback, results, results != null,
-        callable, rpcTimeout);
+    AsyncRequestFutureImpl<CResult> ars = createAsyncRequestFuture(task, actions, ng.getNonceGroup());
     ars.groupAndSendMultiAction(actions, 1);
     return ars;
   }
 
+  private <CResult> AsyncRequestFuture checkTask(AsyncProcessTask<CResult> task) {
+    if (task.getRowAccess() == null || task.getRowAccess().isEmpty()) {
+      return NO_REQS_RESULT;
+    }
+    Objects.requireNonNull(task.getPool(), "The pool can't be NULL");
+    checkOperationTimeout(task.getOperationTimeout());
+    checkRpcTimeout(task.getRpcTimeout());
+    return null;
+  }
+
   private void setNonce(NonceGenerator ng, Row r, Action action) {
     if (!(r instanceof Append) && !(r instanceof Increment)) return;
     action.setNonce(ng.newNonce()); // Action handles NO_NONCE, so it's ok if ng is disabled.
   }
 
-  protected <CResult> AsyncRequestFutureImpl<CResult> createAsyncRequestFuture(
-      TableName tableName, List<Action> actions, long nonceGroup, ExecutorService pool,
-      Batch.Callback<CResult> callback, Object[] results, boolean needResults,
-      CancellableRegionServerCallable callable, int rpcTimeout) {
-    return new AsyncRequestFutureImpl<CResult>(
-        tableName, actions, nonceGroup, getPool(pool), needResults,
-        results, callback, callable, operationTimeout,
-        rpcTimeout > 0 ? rpcTimeout : this.rpcTimeout, this);
+  private int checkTimeout(String name, int timeout) {
+    if (timeout < 0) {
+      throw new RuntimeException("The " + name + " must be bigger than zero,"
+        + "current value is" + timeout);
+    }
+    return timeout;
+  }
+  private int checkOperationTimeout(int operationTimeout) {
+    return checkTimeout("operation timeout", operationTimeout);
+  }
+
+  private int checkRpcTimeout(int rpcTimeout) {
+    return checkTimeout("rpc timeout", rpcTimeout);
+  }
+
+  @VisibleForTesting
+  <CResult> AsyncRequestFutureImpl<CResult> createAsyncRequestFuture(
+      AsyncProcessTask task, List<Action> actions, long nonceGroup) {
+    return new AsyncRequestFutureImpl<>(task, actions, nonceGroup, this);
   }
 
   /** Wait until the async does not have more than max tasks in progress. */
-  protected void waitForMaximumCurrentTasks(int max, String tableName)
+  protected void waitForMaximumCurrentTasks(int max, TableName tableName)
       throws InterruptedIOException {
-    waitForMaximumCurrentTasks(max, tasksInProgress, id, tableName);
+    requestController.waitForMaximumCurrentTasks(max, id, periodToLog,
+      getLogger(tableName, max));
   }
 
-  // Break out this method so testable
-  @VisibleForTesting
-  void waitForMaximumCurrentTasks(int max, final AtomicLong tasksInProgress, final long id,
-      String tableName) throws InterruptedIOException {
-    long lastLog = EnvironmentEdgeManager.currentTime();
-    long currentInProgress, oldInProgress = Long.MAX_VALUE;
-    while ((currentInProgress = tasksInProgress.get()) > max) {
-      if (oldInProgress != currentInProgress) { // Wait for in progress to change.
-        long now = EnvironmentEdgeManager.currentTime();
-        if (now > lastLog + 10000) {
-          lastLog = now;
-          LOG.info("#" + id + ", waiting for some tasks to finish. Expected max="
-              + max + ", tasksInProgress=" + currentInProgress +
-              " hasError=" + hasError() + tableName == null ? "" : ", tableName=" + tableName);
-          if (currentInProgress <= thresholdToLogUndoneTaskDetails) {
-            logDetailsOfUndoneTasks(currentInProgress);
-          }
-        }
-      }
-      oldInProgress = currentInProgress;
-      try {
-        synchronized (tasksInProgress) {
-          if (tasksInProgress.get() == oldInProgress) {
-            tasksInProgress.wait(10);
-          }
-        }
-      } catch (InterruptedException e) {
-        throw new InterruptedIOException("#" + id + ", interrupted." +
-            " currentNumberOfTask=" + currentInProgress);
-      }
-    }
+  private Consumer<Long> getLogger(TableName tableName, long max) {
+    return (currentInProgress) -> {
+      LOG.info("#" + id + (max < 0 ? ", waiting for any free slot"
+      : ", waiting for some tasks to finish. Expected max="
+      + max) + ", tasksInProgress=" + currentInProgress +
+      " hasError=" + hasError() + tableName == null ? "" : ", tableName=" + tableName);
+    };
   }
 
-  void logDetailsOfUndoneTasks(long taskInProgress) {
-    ArrayList<ServerName> servers = new ArrayList<ServerName>();
-    for (Map.Entry<ServerName, AtomicInteger> entry : taskCounterPerServer.entrySet()) {
-      if (entry.getValue().get() > 0) {
-        servers.add(entry.getKey());
-      }
-    }
-    LOG.info("Left over " + taskInProgress + " task(s) are processed on server(s): " + servers);
-    if (taskInProgress <= THRESHOLD_TO_LOG_REGION_DETAILS) {
-      ArrayList<String> regions = new ArrayList<String>();
-      for (Map.Entry<byte[], AtomicInteger> entry : taskCounterPerRegion.entrySet()) {
-        if (entry.getValue().get() > 0) {
-          regions.add(Bytes.toString(entry.getKey()));
-        }
-      }
-      LOG.info("Regions against which left over task(s) are processed: " + regions);
-    }
+  void incTaskCounters(Collection<byte[]> regions, ServerName sn) {
+    requestController.incTaskCounters(regions, sn);
   }
 
+
+  void decTaskCounters(Collection<byte[]> regions, ServerName sn) {
+    requestController.decTaskCounters(regions, sn);
+  }
   /**
    * Only used w/useGlobalErrors ctor argument, for HTable backward compat.
    * @return Whether there were any errors in any request since the last time
    *          {@link #waitForAllPreviousOpsAndReset(List, String)} was called, or AP was created.
    */
   public boolean hasError() {
-    return globalErrors.hasErrors();
+    return globalErrors != null && globalErrors.hasErrors();
   }
 
   /**
@@ -628,9 +470,9 @@ class AsyncProcess {
    *          was called, or AP was created.
    */
   public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
-      List<Row> failedRows, String tableName) throws InterruptedIOException {
+      List<Row> failedRows, TableName tableName) throws InterruptedIOException {
     waitForMaximumCurrentTasks(0, tableName);
-    if (!globalErrors.hasErrors()) {
+    if (globalErrors == null || !globalErrors.hasErrors()) {
       return null;
     }
     if (failedRows != null) {
@@ -642,41 +484,12 @@ class AsyncProcess {
   }
 
   /**
-   * increment the tasks counters for a given set of regions. MT safe.
-   */
-  protected void incTaskCounters(Collection<byte[]> regions, ServerName sn) {
-    tasksInProgress.incrementAndGet();
-
-    computeIfAbsent(taskCounterPerServer, sn, AtomicInteger::new).incrementAndGet();
-
-    for (byte[] regBytes : regions) {
-      computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new).incrementAndGet();
-    }
-  }
-
-  /**
-   * Decrements the counters for a given region and the region server. MT Safe.
-   */
-  protected void decTaskCounters(Collection<byte[]> regions, ServerName sn) {
-    for (byte[] regBytes : regions) {
-      AtomicInteger regionCnt = taskCounterPerRegion.get(regBytes);
-      regionCnt.decrementAndGet();
-    }
-
-    taskCounterPerServer.get(sn).decrementAndGet();
-    tasksInProgress.decrementAndGet();
-    synchronized (tasksInProgress) {
-      tasksInProgress.notifyAll();
-    }
-  }
-
-  /**
    * Create a caller. Isolated to be easily overridden in the tests.
    */
   @VisibleForTesting
   protected RpcRetryingCaller<AbstractResponse> createCaller(
       CancellableRegionServerCallable callable, int rpcTimeout) {
-    return rpcCallerFactory.<AbstractResponse> newCaller(rpcTimeout);
+    return rpcCallerFactory.<AbstractResponse> newCaller(checkRpcTimeout(rpcTimeout));
   }
 
 
@@ -687,7 +500,7 @@ class AsyncProcess {
    * We may benefit from connection-wide tracking of server errors.
    * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection
    */
-  protected ConnectionImplementation.ServerErrorTracker createServerErrorTracker() {
+  ConnectionImplementation.ServerErrorTracker createServerErrorTracker() {
     return new ConnectionImplementation.ServerErrorTracker(
         this.serverTrackerTimeout, this.numTries);
   }
@@ -696,283 +509,4 @@ class AsyncProcess {
     return (row instanceof Get) && (((Get)row).getConsistency() == Consistency.TIMELINE);
   }
 
-  /**
-   * Collect all advices from checkers and make the final decision.
-   */
-  @VisibleForTesting
-  static class RowCheckerHost {
-    private final List<RowChecker> checkers;
-    private boolean isEnd = false;
-    RowCheckerHost(final List<RowChecker> checkers) {
-      this.checkers = checkers;
-    }
-    void reset() throws InterruptedIOException {
-      isEnd = false;
-      InterruptedIOException e = null;
-      for (RowChecker checker : checkers) {
-        try {
-          checker.reset();
-        } catch (InterruptedIOException ex) {
-          e = ex;
-        }
-      }
-      if (e != null) {
-        throw e;
-      }
-    }
-    ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
-      if (isEnd) {
-        return ReturnCode.END;
-      }
-      ReturnCode code = ReturnCode.INCLUDE;
-      for (RowChecker checker : checkers) {
-        switch (checker.canTakeOperation(loc, rowSize)) {
-          case END:
-            isEnd = true;
-            code = ReturnCode.END;
-            break;
-          case SKIP:
-            code = ReturnCode.SKIP;
-            break;
-          case INCLUDE:
-          default:
-            break;
-        }
-        if (code == ReturnCode.END) {
-          break;
-        }
-      }
-      for (RowChecker checker : checkers) {
-        checker.notifyFinal(code, loc, rowSize);
-      }
-      return code;
-    }
-  }
-
-  /**
-   * Provide a way to control the flow of rows iteration.
-   */
-  // Visible for Testing. Adding @VisibleForTesting here doesn't work for some reason.
-  interface RowChecker {
-    enum ReturnCode {
-      /**
-       * Accept current row.
-       */
-      INCLUDE,
-      /**
-       * Skip current row.
-       */
-      SKIP,
-      /**
-       * No more row can be included.
-       */
-      END
-    };
-    ReturnCode canTakeOperation(HRegionLocation loc, long rowSize);
-    /**
-     * Add the final ReturnCode to the checker.
-     * The ReturnCode may be reversed, so the checker need the final decision to update
-     * the inner state.
-     */
-    void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize);
-    /**
-     * Reset the inner state.
-     */
-    void reset() throws InterruptedIOException ;
-  }
-
-  /**
-   * limit the heapsize of total submitted data.
-   * Reduce the limit of heapsize for submitting quickly
-   * if there is no running task.
-   */
-  @VisibleForTesting
-  static class SubmittedSizeChecker implements RowChecker {
-    private final long maxHeapSizeSubmit;
-    private long heapSize = 0;
-    SubmittedSizeChecker(final long maxHeapSizeSubmit) {
-      this.maxHeapSizeSubmit = maxHeapSizeSubmit;
-    }
-    @Override
-    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
-      if (heapSize >= maxHeapSizeSubmit) {
-        return ReturnCode.END;
-      }
-      return ReturnCode.INCLUDE;
-    }
-
-    @Override
-    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
-      if (code == ReturnCode.INCLUDE) {
-        heapSize += rowSize;
-      }
-    }
-
-    @Override
-    public void reset() {
-      heapSize = 0;
-    }
-  }
-  /**
-   * limit the max number of tasks in an AsyncProcess.
-   */
-  @VisibleForTesting
-  static class TaskCountChecker implements RowChecker {
-    private static final long MAX_WAITING_TIME = 1000; //ms
-    private final Set<HRegionInfo> regionsIncluded = new HashSet<>();
-    private final Set<ServerName> serversIncluded = new HashSet<>();
-    private final int maxConcurrentTasksPerRegion;
-    private final int maxTotalConcurrentTasks;
-    private final int maxConcurrentTasksPerServer;
-    private final Map<byte[], AtomicInteger> taskCounterPerRegion;
-    private final Map<ServerName, AtomicInteger> taskCounterPerServer;
-    private final Set<byte[]> busyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
-    private final AtomicLong tasksInProgress;
-    TaskCountChecker(final int maxTotalConcurrentTasks,
-      final int maxConcurrentTasksPerServer,
-      final int maxConcurrentTasksPerRegion,
-      final AtomicLong tasksInProgress,
-      final Map<ServerName, AtomicInteger> taskCounterPerServer,
-      final Map<byte[], AtomicInteger> taskCounterPerRegion) {
-      this.maxTotalConcurrentTasks = maxTotalConcurrentTasks;
-      this.maxConcurrentTasksPerRegion = maxConcurrentTasksPerRegion;
-      this.maxConcurrentTasksPerServer = maxConcurrentTasksPerServer;
-      this.taskCounterPerRegion = taskCounterPerRegion;
-      this.taskCounterPerServer = taskCounterPerServer;
-      this.tasksInProgress = tasksInProgress;
-    }
-    @Override
-    public void reset() throws InterruptedIOException {
-      // prevent the busy-waiting
-      waitForRegion();
-      regionsIncluded.clear();
-      serversIncluded.clear();
-      busyRegions.clear();
-    }
-    private void waitForRegion() throws InterruptedIOException {
-      if (busyRegions.isEmpty()) {
-        return;
-      }
-      EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
-      final long start = ee.currentTime();
-      while ((ee.currentTime() - start) <= MAX_WAITING_TIME) {
-        for (byte[] region : busyRegions) {
-          AtomicInteger count = taskCounterPerRegion.get(region);
-          if (count == null || count.get() < maxConcurrentTasksPerRegion) {
-            return;
-          }
-        }
-        try {
-          synchronized (tasksInProgress) {
-            tasksInProgress.wait(10);
-          }
-        } catch (InterruptedException e) {
-          throw new InterruptedIOException("Interrupted." +
-              " tasksInProgress=" + tasksInProgress);
-        }
-      }
-    }
-    /**
-     * 1) check the regions is allowed.
-     * 2) check the concurrent tasks for regions.
-     * 3) check the total concurrent tasks.
-     * 4) check the concurrent tasks for server.
-     * @param loc
-     * @param rowSize
-     * @return
-     */
-    @Override
-    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
-
-      HRegionInfo regionInfo = loc.getRegionInfo();
-      if (regionsIncluded.contains(regionInfo)) {
-        // We already know what to do with this region.
-        return ReturnCode.INCLUDE;
-      }
-      AtomicInteger regionCnt = taskCounterPerRegion.get(loc.getRegionInfo().getRegionName());
-      if (regionCnt != null && regionCnt.get() >= maxConcurrentTasksPerRegion) {
-        // Too many tasks on this region already.
-        return ReturnCode.SKIP;
-      }
-      int newServers = serversIncluded.size()
-        + (serversIncluded.contains(loc.getServerName()) ? 0 : 1);
-      if ((newServers + tasksInProgress.get()) > maxTotalConcurrentTasks) {
-        // Too many tasks.
-        return ReturnCode.SKIP;
-      }
-      AtomicInteger serverCnt = taskCounterPerServer.get(loc.getServerName());
-      if (serverCnt != null && serverCnt.get() >= maxConcurrentTasksPerServer) {
-        // Too many tasks for this individual server
-        return ReturnCode.SKIP;
-      }
-      return ReturnCode.INCLUDE;
-    }
-
-    @Override
-    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
-      if (code == ReturnCode.INCLUDE) {
-        regionsIncluded.add(loc.getRegionInfo());
-        serversIncluded.add(loc.getServerName());
-      }
-      busyRegions.add(loc.getRegionInfo().getRegionName());
-    }
-  }
-
-  /**
-   * limit the request size for each regionserver.
-   */
-  @VisibleForTesting
-  static class RequestSizeChecker implements RowChecker {
-    private final long maxHeapSizePerRequest;
-    private final Map<ServerName, Long> serverRequestSizes = new HashMap<>();
-    RequestSizeChecker(final long maxHeapSizePerRequest) {
-      this.maxHeapSizePerRequest = maxHeapSizePerRequest;
-    }
-    @Override
-    public void reset() {
-      serverRequestSizes.clear();
-    }
-    @Override
-    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
-      // Is it ok for limit of request size?
-      long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) ?
-        serverRequestSizes.get(loc.getServerName()) : 0L;
-      // accept at least one request
-      if (currentRequestSize == 0 || currentRequestSize + rowSize <= maxHeapSizePerRequest) {
-        return ReturnCode.INCLUDE;
-      }
-      return ReturnCode.SKIP;
-    }
-
-    @Override
-    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
-      if (code == ReturnCode.INCLUDE) {
-        long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName()) ?
-          serverRequestSizes.get(loc.getServerName()) : 0L;
-        serverRequestSizes.put(loc.getServerName(), currentRequestSize + rowSize);
-      }
-    }
-  }
-
-  public static class ListRowAccess<T> implements RowAccess<T> {
-    private final List<T> data;
-    ListRowAccess(final List<T> data) {
-      this.data = data;
-    }
-
-    @Override
-    public int size() {
-      return data.size();
-    }
-
-    @Override
-    public boolean isEmpty() {
-      return data.isEmpty();
-    }
-
-    @Override
-    public Iterator<T> iterator() {
-      return data.iterator();
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java
new file mode 100644
index 0000000..eda1db2
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java
@@ -0,0 +1,229 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.ExecutorService;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+
+/**
+ * Contains the attributes of a task which will be executed
+ * by {@link org.apache.hadoop.hbase.client.AsyncProcess}.
+ * The attributes will be validated by AsyncProcess.
+ * It's intended for advanced client applications.
+ * @param <T> The type of response from server-side
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class AsyncProcessTask<T> {
+  /**
+   * The number of processed rows.
+   * The AsyncProcess has traffic control which may reject some rows.
+   */
+  public enum SubmittedRows {
+    ALL,
+    AT_LEAST_ONE,
+    NORMAL
+  }
+  public static <T> Builder<T> newBuilder(final Batch.Callback<T> callback) {
+    return new Builder<>(callback);
+  }
+  public static Builder newBuilder() {
+    return new Builder();
+  }
+
+  public static class Builder<T> {
+
+    private ExecutorService pool;
+    private TableName tableName;
+    private RowAccess<? extends Row> rows;
+    private SubmittedRows submittedRows = SubmittedRows.ALL;
+    private Batch.Callback<T> callback;
+    private boolean needResults;
+    private int rpcTimeout;
+    private int operationTimeout;
+    private CancellableRegionServerCallable callable;
+    private Object[] results;
+
+    private Builder() {
+    }
+
+    private Builder(Batch.Callback<T> callback) {
+      this.callback = callback;
+    }
+
+    Builder<T> setResults(Object[] results) {
+      this.results = results;
+      if (results != null && results.length != 0) {
+        setNeedResults(true);
+      }
+      return this;
+    }
+
+    public Builder<T> setPool(ExecutorService pool) {
+      this.pool = pool;
+      return this;
+    }
+
+    public Builder<T> setRpcTimeout(int rpcTimeout) {
+      this.rpcTimeout = rpcTimeout;
+      return this;
+    }
+
+    public Builder<T> setOperationTimeout(int operationTimeout) {
+      this.operationTimeout = operationTimeout;
+      return this;
+    }
+
+    public Builder<T> setTableName(TableName tableName) {
+      this.tableName = tableName;
+      return this;
+    }
+
+    public Builder<T> setRowAccess(List<? extends Row> rows) {
+      this.rows = new ListRowAccess<>(rows);
+      return this;
+    }
+
+    public Builder<T> setRowAccess(RowAccess<? extends Row> rows) {
+      this.rows = rows;
+      return this;
+    }
+
+    public Builder<T> setSubmittedRows(SubmittedRows submittedRows) {
+      this.submittedRows = submittedRows;
+      return this;
+    }
+
+    public Builder<T> setNeedResults(boolean needResults) {
+      this.needResults = needResults;
+      return this;
+    }
+
+    Builder<T> setCallable(CancellableRegionServerCallable callable) {
+      this.callable = callable;
+      return this;
+    }
+
+    public AsyncProcessTask<T> build() {
+      return new AsyncProcessTask<>(pool, tableName, rows, submittedRows,
+              callback, callable, needResults, rpcTimeout, operationTimeout, results);
+    }
+  }
+  private final ExecutorService pool;
+  private final TableName tableName;
+  private final RowAccess<? extends Row> rows;
+  private final SubmittedRows submittedRows;
+  private final Batch.Callback<T> callback;
+  private final CancellableRegionServerCallable callable;
+  private final boolean needResults;
+  private final int rpcTimeout;
+  private final int operationTimeout;
+  private final Object[] results;
+  AsyncProcessTask(AsyncProcessTask<T> task) {
+    this(task.getPool(), task.getTableName(), task.getRowAccess(),
+        task.getSubmittedRows(), task.getCallback(), task.getCallable(),
+        task.getNeedResults(), task.getRpcTimeout(), task.getOperationTimeout(),
+        task.getResults());
+  }
+  AsyncProcessTask(ExecutorService pool, TableName tableName,
+          RowAccess<? extends Row> rows, SubmittedRows size, Batch.Callback<T> callback,
+          CancellableRegionServerCallable callable, boolean needResults,
+          int rpcTimeout, int operationTimeout, Object[] results) {
+    this.pool = pool;
+    this.tableName = tableName;
+    this.rows = rows;
+    this.submittedRows = size;
+    this.callback = callback;
+    this.callable = callable;
+    this.needResults = needResults;
+    this.rpcTimeout = rpcTimeout;
+    this.operationTimeout = operationTimeout;
+    this.results = results;
+  }
+
+  public int getOperationTimeout() {
+    return operationTimeout;
+  }
+
+  public ExecutorService getPool() {
+    return pool;
+  }
+
+  public TableName getTableName() {
+    return tableName;
+  }
+
+  public RowAccess<? extends Row> getRowAccess() {
+    return rows;
+  }
+
+  public SubmittedRows getSubmittedRows() {
+    return submittedRows;
+  }
+
+  public Batch.Callback<T> getCallback() {
+    return callback;
+  }
+
+  CancellableRegionServerCallable getCallable() {
+    return callable;
+  }
+
+  Object[] getResults() {
+    return results;
+  }
+
+  public boolean getNeedResults() {
+    return needResults;
+  }
+
+  public int getRpcTimeout() {
+    return rpcTimeout;
+  }
+
+  static class ListRowAccess<T> implements RowAccess<T> {
+
+    private final List<T> data;
+
+    ListRowAccess(final List<T> data) {
+      this.data = data;
+    }
+
+    @Override
+    public int size() {
+      return data.size();
+    }
+
+    @Override
+    public boolean isEmpty() {
+      return data.isEmpty();
+    }
+
+    @Override
+    public Iterator<T> iterator() {
+      return data.iterator();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index d176ce1..036196e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -300,11 +300,11 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
   private final int[] replicaGetIndices;
   private final boolean hasAnyReplicaGets;
   private final long nonceGroup;
-  private CancellableRegionServerCallable currentCallable;
-  private int operationTimeout;
-  private int rpcTimeout;
+  private final CancellableRegionServerCallable currentCallable;
+  private final int operationTimeout;
+  private final int rpcTimeout;
   private final Map<ServerName, List<Long>> heapSizesByServer = new HashMap<>();
-  protected AsyncProcess asyncProcess;
+  private final AsyncProcess asyncProcess;
 
   /**
    * For {@link AsyncRequestFutureImpl#manageError(int, Row, Retry, Throwable, ServerName)}. Only
@@ -339,32 +339,27 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
     }
   }
 
-
-
-  public AsyncRequestFutureImpl(TableName tableName, List<Action> actions, long nonceGroup,
-      ExecutorService pool, boolean needResults, Object[] results, Batch.Callback<CResult> callback,
-      CancellableRegionServerCallable callable, int operationTimeout, int rpcTimeout,
-      AsyncProcess asyncProcess) {
-    this.pool = pool;
-    this.callback = callback;
+  public AsyncRequestFutureImpl(AsyncProcessTask task, List<Action> actions,
+      long nonceGroup, AsyncProcess asyncProcess) {
+    this.pool = task.getPool();
+    this.callback = task.getCallback();
     this.nonceGroup = nonceGroup;
-    this.tableName = tableName;
+    this.tableName = task.getTableName();
     this.actionsInProgress.set(actions.size());
-    if (results != null) {
-      assert needResults;
-      if (results.length != actions.size()) {
+    if (task.getResults() == null) {
+      results = task.getNeedResults() ? new Object[actions.size()] : null;
+    } else {
+      if (task.getResults().length != actions.size()) {
         throw new AssertionError("results.length");
       }
-      this.results = results;
+      this.results = task.getResults();
       for (int i = 0; i != this.results.length; ++i) {
         results[i] = null;
       }
-    } else {
-      this.results = needResults ? new Object[actions.size()] : null;
     }
     List<Integer> replicaGetIndices = null;
     boolean hasAnyReplicaGets = false;
-    if (needResults) {
+    if (results != null) {
       // Check to see if any requests might require replica calls.
       // We expect that many requests will consist of all or no multi-replica gets; in such
       // cases we would just use a boolean (hasAnyReplicaGets). If there's a mix, we will
@@ -414,10 +409,10 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
     this.errorsByServer = createServerErrorTracker();
     this.errors = (asyncProcess.globalErrors != null)
         ? asyncProcess.globalErrors : new BatchErrors();
-    this.operationTimeout = operationTimeout;
-    this.rpcTimeout = rpcTimeout;
-    this.currentCallable = callable;
-    if (callable == null) {
+    this.operationTimeout = task.getOperationTimeout();
+    this.rpcTimeout = task.getRpcTimeout();
+    this.currentCallable = task.getCallable();
+    if (task.getCallable() == null) {
       tracker = new RetryingTimeTracker().start();
     }
   }
@@ -1246,9 +1241,6 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
           lastLog = now;
           LOG.info("#" + asyncProcess.id + ", waiting for " + currentInProgress
               + "  actions to finish on table: " + tableName);
-          if (currentInProgress <= asyncProcess.thresholdToLogUndoneTaskDetails) {
-            asyncProcess.logDetailsOfUndoneTasks(currentInProgress);
-          }
         }
       }
       synchronized (actionsInProgress) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 0085767..2a55de9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -19,12 +19,9 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants; // Needed for write rpc timeout
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.Collections;
@@ -36,6 +33,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 
 /**
  * <p>
@@ -67,61 +66,70 @@ public class BufferedMutatorImpl implements BufferedMutator {
       "hbase.client.bufferedmutator.classname";
 
   private static final Log LOG = LogFactory.getLog(BufferedMutatorImpl.class);
-  
+
   private final ExceptionListener listener;
 
-  protected ClusterConnection connection; // non-final so can be overridden in test
   private final TableName tableName;
-  private volatile Configuration conf;
-
-  @VisibleForTesting
-  final ConcurrentLinkedQueue<Mutation> writeAsyncBuffer = new ConcurrentLinkedQueue<Mutation>();
-  @VisibleForTesting
-  AtomicLong currentWriteBufferSize = new AtomicLong(0);
 
+  private final Configuration conf;
+  private final ConcurrentLinkedQueue<Mutation> writeAsyncBuffer = new ConcurrentLinkedQueue<>();
+  private final AtomicLong currentWriteBufferSize = new AtomicLong(0);
   /**
    * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}.
    * The {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation.
    */
-  @VisibleForTesting
-  AtomicInteger undealtMutationCount = new AtomicInteger(0);
-  private long writeBufferSize;
+  private final AtomicInteger undealtMutationCount = new AtomicInteger(0);
+  private volatile long writeBufferSize;
   private final int maxKeyValueSize;
-  private boolean closed = false;
   private final ExecutorService pool;
-  private int writeRpcTimeout; // needed to pass in through AsyncProcess constructor
-  private int operationTimeout;
+  private final AtomicInteger rpcTimeout;
+  private final AtomicInteger operationTimeout;
+  private final boolean cleanupPoolOnClose;
+  private volatile boolean closed = false;
+  private final AsyncProcess ap;
 
   @VisibleForTesting
-  protected AsyncProcess ap; // non-final so can be overridden in test
-
-  BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
-      RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
+  BufferedMutatorImpl(ClusterConnection conn, BufferedMutatorParams params, AsyncProcess ap) {
     if (conn == null || conn.isClosed()) {
       throw new IllegalArgumentException("Connection is null or closed.");
     }
-
     this.tableName = params.getTableName();
-    this.connection = conn;
-    this.conf = connection.getConfiguration();
-    this.pool = params.getPool();
+    this.conf = conn.getConfiguration();
     this.listener = params.getListener();
-
+    if (params.getPool() == null) {
+      this.pool = HTable.getDefaultExecutor(conf);
+      cleanupPoolOnClose = true;
+    } else {
+      this.pool = params.getPool();
+      cleanupPoolOnClose = false;
+    }
     ConnectionConfiguration tableConf = new ConnectionConfiguration(conf);
     this.writeBufferSize = params.getWriteBufferSize() != BufferedMutatorParams.UNSET ?
         params.getWriteBufferSize() : tableConf.getWriteBufferSize();
     this.maxKeyValueSize = params.getMaxKeyValueSize() != BufferedMutatorParams.UNSET ?
         params.getMaxKeyValueSize() : tableConf.getMaxKeyValueSize();
 
-    this.writeRpcTimeout = conn.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
-        conn.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
-    this.operationTimeout = conn.getConfiguration().getInt(
-        HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-    // puts need to track errors globally due to how the APIs currently work.
-    ap = new AsyncProcess(connection, conf, pool, rpcCallerFactory, true, rpcFactory,
-        writeRpcTimeout, operationTimeout);
+    this.rpcTimeout = new AtomicInteger(params.getRpcTimeout() != BufferedMutatorParams.UNSET ?
+    params.getRpcTimeout() : conn.getConnectionConfiguration().getWriteRpcTimeout());
+    this.operationTimeout = new AtomicInteger(params.getOperationTimeout()!= BufferedMutatorParams.UNSET ?
+    params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout());
+    this.ap = ap;
+  }
+  BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory,
+      RpcControllerFactory rpcFactory, BufferedMutatorParams params) {
+    this(conn, params,
+      // puts need to track errors globally due to how the APIs currently work.
+      new AsyncProcess(conn, conn.getConfiguration(), rpcCallerFactory, true, rpcFactory));
+  }
+
+  @VisibleForTesting
+  ExecutorService getPool() {
+    return pool;
+  }
+
+  @VisibleForTesting
+  AsyncProcess getAsyncProcess() {
+    return ap;
   }
 
   @Override
@@ -193,22 +201,22 @@ public class BufferedMutatorImpl implements BufferedMutator {
       // As we can have an operation in progress even if the buffer is empty, we call
       // backgroundFlushCommits at least one time.
       backgroundFlushCommits(true);
-      this.pool.shutdown();
-      boolean terminated;
-      int loopCnt = 0;
-      do {
-        // wait until the pool has terminated
-        terminated = this.pool.awaitTermination(60, TimeUnit.SECONDS);
-        loopCnt += 1;
-        if (loopCnt >= 10) {
-          LOG.warn("close() failed to terminate pool after 10 minutes. Abandoning pool.");
-          break;
-        }
-      } while (!terminated);
-
+      if (cleanupPoolOnClose) {
+        this.pool.shutdown();
+        boolean terminated;
+        int loopCnt = 0;
+        do {
+          // wait until the pool has terminated
+          terminated = this.pool.awaitTermination(60, TimeUnit.SECONDS);
+          loopCnt += 1;
+          if (loopCnt >= 10) {
+            LOG.warn("close() failed to terminate pool after 10 minutes. Abandoning pool.");
+            break;
+          }
+        } while (!terminated);
+      }
     } catch (InterruptedException e) {
       LOG.warn("waitForTermination interrupted");
-
     } finally {
       this.closed = true;
     }
@@ -239,8 +247,9 @@ public class BufferedMutatorImpl implements BufferedMutator {
 
     if (!synchronous) {
       QueueRowAccess taker = new QueueRowAccess();
+      AsyncProcessTask task = wrapAsyncProcessTask(taker);
       try {
-        ap.submit(tableName, taker, true, null, false);
+        ap.submit(task);
         if (ap.hasError()) {
           LOG.debug(tableName + ": One or more of the operations have failed -"
               + " waiting for all operation in progress to finish (successfully or not)");
@@ -251,17 +260,17 @@ public class BufferedMutatorImpl implements BufferedMutator {
     }
     if (synchronous || ap.hasError()) {
       QueueRowAccess taker = new QueueRowAccess();
+      AsyncProcessTask task = wrapAsyncProcessTask(taker);
       try {
         while (!taker.isEmpty()) {
-          ap.submit(tableName, taker, true, null, false);
+          ap.submit(task);
           taker.reset();
         }
       } finally {
         taker.restoreRemainder();
       }
-
       RetriesExhaustedWithDetailsException error =
-          ap.waitForAllPreviousOpsAndReset(null, tableName.getNameAsString());
+          ap.waitForAllPreviousOpsAndReset(null, tableName);
       if (error != null) {
         if (listener == null) {
           throw error;
@@ -273,8 +282,38 @@ public class BufferedMutatorImpl implements BufferedMutator {
   }
 
   /**
+   * Reuse the AsyncProcessTask when calling {@link BufferedMutatorImpl#backgroundFlushCommits(boolean)}.
+   * @param taker access the inner buffer.
+   * @return An AsyncProcessTask which always returns the latest rpc and operation timeout.
+   */
+  private AsyncProcessTask wrapAsyncProcessTask(QueueRowAccess taker) {
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+        .setPool(pool)
+        .setTableName(tableName)
+        .setRowAccess(taker)
+        .setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE)
+        .build();
+    return new AsyncProcessTask(task) {
+      @Override
+      public int getRpcTimeout() {
+        return rpcTimeout.get();
+      }
+
+      @Override
+      public int getOperationTimeout() {
+        return operationTimeout.get();
+      }
+    };
+  }
+  /**
    * This is used for legacy purposes in {@link HTable#setWriteBufferSize(long)} only. This ought
    * not be called for production uses.
+   * If the new buffer size is smaller than the stored data, the {@link BufferedMutatorImpl#flush()}
+   * will be called.
+   * @param writeBufferSize The max size of internal buffer where data is stored.
+   * @throws org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException
+   * if an I/O error occurs and there are too many retries.
+   * @throws java.io.InterruptedIOException if the I/O task is interrupted.
    * @deprecated Going away when we drop public support for {@link HTable}.
    */
   @Deprecated
@@ -295,15 +334,23 @@ public class BufferedMutatorImpl implements BufferedMutator {
   }
 
   @Override
-  public void setRpcTimeout(int timeout) {
-    this.writeRpcTimeout = timeout;
-    ap.setRpcTimeout(timeout);
+  public void setRpcTimeout(int rpcTimeout) {
+    this.rpcTimeout.set(rpcTimeout);
   }
 
   @Override
-  public void setOperationTimeout(int timeout) {
-    this.operationTimeout = timeout;
-    ap.setOperationTimeout(operationTimeout);
+  public void setOperationTimeout(int operationTimeout) {
+    this.operationTimeout.set(operationTimeout);
+  }
+
+  @VisibleForTesting
+  long getCurrentWriteBufferSize() {
+    return currentWriteBufferSize.get();
+  }
+
+  @VisibleForTesting
+  int size() {
+    return undealtMutationCount.get();
   }
 
   private class QueueRowAccess implements RowAccess<Row> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
index 17c69ec..9c901e2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java
@@ -39,7 +39,8 @@ public class BufferedMutatorParams implements Cloneable {
   private int maxKeyValueSize = UNSET;
   private ExecutorService pool = null;
   private String implementationClassName = null;
-
+  private int rpcTimeout = UNSET;
+  private int operationTimeout = UNSET;
   private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() {
     @Override
     public void onException(RetriesExhaustedWithDetailsException exception,
@@ -61,6 +62,24 @@ public class BufferedMutatorParams implements Cloneable {
     return writeBufferSize;
   }
 
+  public BufferedMutatorParams rpcTimeout(final int rpcTimeout) {
+    this.rpcTimeout = rpcTimeout;
+    return this;
+  }
+
+  public int getRpcTimeout() {
+    return rpcTimeout;
+  }
+
+  public BufferedMutatorParams opertationTimeout(final int operationTimeout) {
+    this.operationTimeout = operationTimeout;
+    return this;
+  }
+
+  public int getOperationTimeout() {
+    return operationTimeout;
+  }
+
   /**
    * Override the write buffer size specified by the provided {@link Connection}'s
    * {@link org.apache.hadoop.conf.Configuration} instance, via the configuration key

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
index 35bebae..41f5baf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
@@ -42,7 +42,8 @@ public class ConnectionConfiguration {
   private final int replicaCallTimeoutMicroSecondScan;
   private final int retries;
   private final int maxKeyValueSize;
-
+  private final int readRpcTimeout;
+  private final int writeRpcTimeout;
     // toggle for async/sync prefetch
   private final boolean clientScannerAsyncPrefetch;
 
@@ -80,6 +81,12 @@ public class ConnectionConfiguration {
        Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH);
 
     this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT);
+
+    this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
+        conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+
+    this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
+        conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
   }
 
   /**
@@ -99,6 +106,16 @@ public class ConnectionConfiguration {
     this.retries = HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
     this.clientScannerAsyncPrefetch = Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH;
     this.maxKeyValueSize = MAX_KEYVALUE_SIZE_DEFAULT;
+    this.readRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+    this.writeRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+  }
+
+  public int getReadRpcTimeout() {
+    return readRpcTimeout;
+  }
+
+  public int getWriteRpcTimeout() {
+    return writeRpcTimeout;
   }
 
   public long getWriteBufferSize() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index a597be3..ceac3fb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -249,7 +249,7 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats);
     this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
-    this.asyncProcess = createAsyncProcess(this.conf);
+    this.asyncProcess = new AsyncProcess(this, conf, rpcCallerFactory, false, rpcControllerFactory);
     if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) {
       this.metrics = new MetricsConnection(this);
     } else {
@@ -1833,17 +1833,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
     metaCache.clearCache(regionInfo);
   }
 
-  // For tests to override.
-  protected AsyncProcess createAsyncProcess(Configuration conf) {
-    // No default pool available.
-    int rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-        HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-    int operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-    return new AsyncProcess(this, conf, batchPool, rpcCallerFactory, false, rpcControllerFactory,
-        rpcTimeout, operationTimeout);
-  }
-
   @Override
   public AsyncProcess getAsyncProcess() {
     return asyncProcess;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index dd11abf..fd5eda3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -103,27 +103,28 @@ import org.apache.hadoop.hbase.util.Threads;
 @InterfaceStability.Stable
 public class HTable implements Table {
   private static final Log LOG = LogFactory.getLog(HTable.class);
-  protected ClusterConnection connection;
+  private static final Consistency DEFAULT_CONSISTENCY = Consistency.STRONG;
+  private final ClusterConnection connection;
   private final TableName tableName;
-  private volatile Configuration configuration;
-  private ConnectionConfiguration connConfiguration;
-  protected BufferedMutatorImpl mutator;
+  private final Configuration configuration;
+  private final ConnectionConfiguration connConfiguration;
+  @VisibleForTesting
+  BufferedMutatorImpl mutator;
   private boolean closed = false;
-  protected int scannerCaching;
-  protected long scannerMaxResultSize;
-  private ExecutorService pool;  // For Multi & Scan
+  private final int scannerCaching;
+  private final long scannerMaxResultSize;
+  private final ExecutorService pool;  // For Multi & Scan
   private int operationTimeout; // global timeout for each blocking method with retrying rpc
   private int readRpcTimeout; // timeout for each read rpc request
   private int writeRpcTimeout; // timeout for each write rpc request
   private final boolean cleanupPoolOnClose; // shutdown the pool in close()
-  private final boolean cleanupConnectionOnClose; // close the connection in close()
-  private Consistency defaultConsistency = Consistency.STRONG;
-  private HRegionLocator locator;
+  private final HRegionLocator locator;
 
   /** The Async process for batch */
-  protected AsyncProcess multiAp;
-  private RpcRetryingCallerFactory rpcCallerFactory;
-  private RpcControllerFactory rpcControllerFactory;
+  @VisibleForTesting
+  AsyncProcess multiAp;
+  private final RpcRetryingCallerFactory rpcCallerFactory;
+  private final RpcControllerFactory rpcControllerFactory;
 
   // Marked Private @since 1.0
   @InterfaceAudience.Private
@@ -167,22 +168,42 @@ public class HTable implements Table {
       throw new IllegalArgumentException("Given table name is null");
     }
     this.tableName = tableName;
-    this.cleanupConnectionOnClose = false;
     this.connection = connection;
     this.configuration = connection.getConfiguration();
-    this.connConfiguration = tableConfig;
-    this.pool = pool;
+    if (tableConfig == null) {
+      connConfiguration = new ConnectionConfiguration(configuration);
+    } else {
+      connConfiguration = tableConfig;
+    }
     if (pool == null) {
       this.pool = getDefaultExecutor(this.configuration);
       this.cleanupPoolOnClose = true;
     } else {
+      this.pool = pool;
       this.cleanupPoolOnClose = false;
     }
+    if (rpcCallerFactory == null) {
+      this.rpcCallerFactory = connection.getNewRpcRetryingCallerFactory(configuration);
+    } else {
+      this.rpcCallerFactory = rpcCallerFactory;
+    }
 
-    this.rpcCallerFactory = rpcCallerFactory;
-    this.rpcControllerFactory = rpcControllerFactory;
+    if (rpcControllerFactory == null) {
+      this.rpcControllerFactory = RpcControllerFactory.instantiate(configuration);
+    } else {
+      this.rpcControllerFactory = rpcControllerFactory;
+    }
+
+    this.operationTimeout = tableName.isSystemTable() ?
+        connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
+    this.readRpcTimeout = connConfiguration.getReadRpcTimeout();
+    this.writeRpcTimeout = connConfiguration.getWriteRpcTimeout();
+    this.scannerCaching = connConfiguration.getScannerCaching();
+    this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
 
-    this.finishSetup();
+    // puts need to track errors globally due to how the APIs currently work.
+    multiAp = this.connection.getAsyncProcess();
+    this.locator = new HRegionLocator(tableName, connection);
   }
 
   /**
@@ -190,20 +211,23 @@ public class HTable implements Table {
    * @throws IOException
    */
   @VisibleForTesting
-  protected HTable(ClusterConnection conn, BufferedMutatorParams params) throws IOException {
+  protected HTable(ClusterConnection conn, BufferedMutatorImpl mutator) throws IOException {
     connection = conn;
-    tableName = params.getTableName();
-    connConfiguration = new ConnectionConfiguration(connection.getConfiguration());
+    this.tableName = mutator.getName();
+    this.configuration = connection.getConfiguration();
+    connConfiguration = new ConnectionConfiguration(configuration);
     cleanupPoolOnClose = false;
-    cleanupConnectionOnClose = false;
-    // used from tests, don't trust the connection is real
-    this.mutator = new BufferedMutatorImpl(conn, null, null, params);
-    this.readRpcTimeout = conn.getConfiguration().getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
-        conn.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
-    this.writeRpcTimeout = conn.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
-        conn.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+    this.mutator = mutator;
+    this.operationTimeout = tableName.isSystemTable() ?
+        connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
+    this.readRpcTimeout = connConfiguration.getReadRpcTimeout();
+    this.writeRpcTimeout = connConfiguration.getWriteRpcTimeout();
+    this.scannerCaching = connConfiguration.getScannerCaching();
+    this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
+    this.rpcControllerFactory = null;
+    this.rpcCallerFactory = null;
+    this.pool = mutator.getPool();
+    this.locator = null;
   }
 
   /**
@@ -214,36 +238,6 @@ public class HTable implements Table {
   }
 
   /**
-   * setup this HTable's parameter based on the passed configuration
-   */
-  private void finishSetup() throws IOException {
-    if (connConfiguration == null) {
-      connConfiguration = new ConnectionConfiguration(configuration);
-    }
-
-    this.operationTimeout = tableName.isSystemTable() ?
-        connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
-    this.readRpcTimeout = configuration.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
-        configuration.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
-    this.writeRpcTimeout = configuration.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
-        configuration.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
-    this.scannerCaching = connConfiguration.getScannerCaching();
-    this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
-    if (this.rpcCallerFactory == null) {
-      this.rpcCallerFactory = connection.getNewRpcRetryingCallerFactory(configuration);
-    }
-    if (this.rpcControllerFactory == null) {
-      this.rpcControllerFactory = RpcControllerFactory.instantiate(configuration);
-    }
-
-    // puts need to track errors globally due to how the APIs currently work.
-    multiAp = this.connection.getAsyncProcess();
-    this.locator = new HRegionLocator(getName(), connection);
-  }
-
-  /**
    * {@inheritDoc}
    */
   @Override
@@ -423,7 +417,7 @@ public class HTable implements Table {
       get = ReflectionUtils.newInstance(get.getClass(), get);
       get.setCheckExistenceOnly(checkExistenceOnly);
       if (get.getConsistency() == null){
-        get.setConsistency(defaultConsistency);
+        get.setConsistency(DEFAULT_CONSISTENCY);
       }
     }
 
@@ -483,13 +477,37 @@ public class HTable implements Table {
   @Override
   public void batch(final List<? extends Row> actions, final Object[] results)
       throws InterruptedException, IOException {
-    batch(actions, results, -1);
+    int rpcTimeout = writeRpcTimeout;
+    boolean hasRead = false;
+    boolean hasWrite = false;
+    for (Row action : actions) {
+      if (action instanceof Mutation) {
+        hasWrite = true;
+      } else {
+        hasRead = true;
+      }
+      if (hasRead && hasWrite) {
+        break;
+      }
+    }
+    if (hasRead && !hasWrite) {
+      rpcTimeout = readRpcTimeout;
+    }
+    batch(actions, results, rpcTimeout);
   }
 
   public void batch(final List<? extends Row> actions, final Object[] results, int rpcTimeout)
       throws InterruptedException, IOException {
-    AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, actions, null, results, null,
-        rpcTimeout);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(actions)
+            .setResults(results)
+            .setRpcTimeout(rpcTimeout)
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = multiAp.submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -509,8 +527,20 @@ public class HTable implements Table {
   public static <R> void doBatchWithCallback(List<? extends Row> actions, Object[] results,
     Callback<R> callback, ClusterConnection connection, ExecutorService pool, TableName tableName)
     throws InterruptedIOException, RetriesExhaustedWithDetailsException {
-    AsyncRequestFuture ars = connection.getAsyncProcess().submitAll(
-      pool, tableName, actions, callback, results);
+    int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
+    int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY,
+        connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+            HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
+    AsyncProcessTask<R> task = AsyncProcessTask.newBuilder(callback)
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(actions)
+            .setResults(results)
+            .setOperationTimeout(operationTimeout)
+            .setRpcTimeout(writeTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = connection.getAsyncProcess().submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -536,8 +566,16 @@ public class HTable implements Table {
       }
     };
     List<Delete> rows = Collections.singletonList(delete);
-    AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows,
-        null, null, callable, writeRpcTimeout);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(rows)
+            .setCallable(callable)
+            .setRpcTimeout(writeRpcTimeout)
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = multiAp.submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -615,8 +653,16 @@ public class HTable implements Table {
         return ResponseConverter.getResults(request, response, getRpcControllerCellScanner());
       }
     };
-    AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rm.getMutations(),
-        null, null, callable, writeRpcTimeout);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(rm.getMutations())
+            .setCallable(callable)
+            .setRpcTimeout(writeRpcTimeout)
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = multiAp.submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -795,8 +841,18 @@ public class HTable implements Table {
     };
     List<Delete> rows = Collections.singletonList(delete);
     Object[] results = new Object[1];
-    AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rows,
-        null, results, callable, -1);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(rows)
+            .setCallable(callable)
+            // TODO any better timeout?
+            .setRpcTimeout(Math.max(readRpcTimeout, writeRpcTimeout))
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .setResults(results)
+            .build();
+    AsyncRequestFuture ars = multiAp.submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -839,8 +895,18 @@ public class HTable implements Table {
      *  It is excessive to send such a large array, but that is required by the framework right now
      * */
     Object[] results = new Object[rm.getMutations().size()];
-    AsyncRequestFuture ars = multiAp.submitAll(pool, tableName, rm.getMutations(),
-      null, results, callable, -1);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(rm.getMutations())
+            .setResults(results)
+            .setCallable(callable)
+            // TODO any better timeout?
+            .setRpcTimeout(Math.max(readRpcTimeout, writeRpcTimeout))
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = multiAp.submit(task);
     ars.waitUntilDone();
     if (ars.hasError()) {
       throw ars.getErrors();
@@ -926,6 +992,10 @@ public class HTable implements Table {
       return;
     }
     flushCommits();
+    if (mutator != null) {
+      mutator.close();
+      mutator = null;
+    }
     if (cleanupPoolOnClose) {
       this.pool.shutdown();
       try {
@@ -939,11 +1009,6 @@ public class HTable implements Table {
         LOG.warn("waitForTermination interrupted");
       }
     }
-    if (cleanupConnectionOnClose) {
-      if (this.connection != null) {
-        this.connection.close();
-      }
-    }
     this.closed = true;
   }
 
@@ -1102,7 +1167,6 @@ public class HTable implements Table {
     if (mutator != null) {
       mutator.setOperationTimeout(operationTimeout);
     }
-    multiAp.setOperationTimeout(operationTimeout);
   }
 
   @Override
@@ -1134,7 +1198,6 @@ public class HTable implements Table {
     if (mutator != null) {
       mutator.setRpcTimeout(writeRpcTimeout);
     }
-    multiAp.setRpcTimeout(writeRpcTimeout);
   }
 
   @Override
@@ -1217,37 +1280,41 @@ public class HTable implements Table {
     Object[] results = new Object[execs.size()];
 
     AsyncProcess asyncProcess =
-        new AsyncProcess(connection, configuration, pool,
+        new AsyncProcess(connection, configuration,
             RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()),
-            true, RpcControllerFactory.instantiate(configuration), readRpcTimeout,
-            operationTimeout);
-
-    AsyncRequestFuture future = asyncProcess.submitAll(null, tableName, execs,
-        new Callback<ClientProtos.CoprocessorServiceResult>() {
-          @Override
-          public void update(byte[] region, byte[] row,
-                              ClientProtos.CoprocessorServiceResult serviceResult) {
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("Received result for endpoint " + methodDescriptor.getFullName() +
-                  ": region=" + Bytes.toStringBinary(region) +
-                  ", row=" + Bytes.toStringBinary(row) +
-                  ", value=" + serviceResult.getValue().getValue());
-            }
-            try {
-              Message.Builder builder = responsePrototype.newBuilderForType();
-              org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder,
-                  serviceResult.getValue().getValue().toByteArray());
-              callback.update(region, row, (R) builder.build());
-            } catch (IOException e) {
-              LOG.error("Unexpected response type from endpoint " + methodDescriptor.getFullName(),
-                  e);
-              callbackErrorExceptions.add(e);
-              callbackErrorActions.add(execsByRow.get(row));
-              callbackErrorServers.add("null");
-            }
-          }
-        }, results);
-
+            true, RpcControllerFactory.instantiate(configuration));
+
+    Callback<ClientProtos.CoprocessorServiceResult> resultsCallback
+    = (byte[] region, byte[] row, ClientProtos.CoprocessorServiceResult serviceResult) -> {
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Received result for endpoint " + methodDescriptor.getFullName() +
+            ": region=" + Bytes.toStringBinary(region) +
+            ", row=" + Bytes.toStringBinary(row) +
+            ", value=" + serviceResult.getValue().getValue());
+      }
+      try {
+        Message.Builder builder = responsePrototype.newBuilderForType();
+        org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder,
+            serviceResult.getValue().getValue().toByteArray());
+        callback.update(region, row, (R) builder.build());
+      } catch (IOException e) {
+        LOG.error("Unexpected response type from endpoint " + methodDescriptor.getFullName(),
+            e);
+        callbackErrorExceptions.add(e);
+        callbackErrorActions.add(execsByRow.get(row));
+        callbackErrorServers.add("null");
+      }
+    };
+    AsyncProcessTask<ClientProtos.CoprocessorServiceResult> task = AsyncProcessTask.newBuilder(resultsCallback)
+            .setPool(pool)
+            .setTableName(tableName)
+            .setRowAccess(execs)
+            .setResults(results)
+            .setRpcTimeout(readRpcTimeout)
+            .setOperationTimeout(operationTimeout)
+            .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture future = asyncProcess.submit(task);
     future.waitUntilDone();
 
     if (future.hasError()) {
@@ -1270,10 +1337,10 @@ public class HTable implements Table {
               .pool(pool)
               .writeBufferSize(connConfiguration.getWriteBufferSize())
               .maxKeyValueSize(connConfiguration.getMaxKeyValueSize())
+              .opertationTimeout(operationTimeout)
+              .rpcTimeout(writeRpcTimeout)
       );
     }
-    mutator.setRpcTimeout(writeRpcTimeout);
-    mutator.setOperationTimeout(operationTimeout);
     return mutator;
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 8ff64bf..c03b969 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -443,7 +443,7 @@ public class HTableMultiplexer {
     private final AtomicInteger retryInQueue = new AtomicInteger(0);
     private final int writeRpcTimeout; // needed to pass in through AsyncProcess constructor
     private final int operationTimeout;
-
+    private final ExecutorService pool;
     public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr,
         HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize,
         ExecutorService pool, ScheduledExecutorService executor) {
@@ -457,10 +457,10 @@ public class HTableMultiplexer {
               HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
       this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
           HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-      this.ap = new AsyncProcess(conn, conf, pool, rpcCallerFactory, false, rpcControllerFactory,
-          writeRpcTimeout, operationTimeout);
+      this.ap = new AsyncProcess(conn, conf, rpcCallerFactory, false, rpcControllerFactory);
       this.executor = executor;
       this.maxRetryInQueue = conf.getInt(TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE, 10000);
+      this.pool = pool;
     }
 
     protected LinkedBlockingQueue<PutStatus> getQueue() {
@@ -594,9 +594,14 @@ public class HTableMultiplexer {
         Map<ServerName, MultiAction> actionsByServer =
             Collections.singletonMap(server, actions);
         try {
+          AsyncProcessTask task = AsyncProcessTask.newBuilder()
+                  .setResults(results)
+                  .setPool(pool)
+                  .setRpcTimeout(writeRpcTimeout)
+                  .setOperationTimeout(operationTimeout)
+                  .build();
           AsyncRequestFuture arf =
-              ap.submitMultiActions(null, retainedActions, 0L, null, results, true, null,
-                null, actionsByServer, null);
+              ap.submitMultiActions(task, retainedActions, 0L, null, null, actionsByServer);
           arf.waitUntilDone();
           if (arf.hasError()) {
             // We just log and ignore the exception here since failed Puts will be resubmit again.


[49/50] [abbrv] hbase git commit: Revert "HBASE-17346 AggregationClient cleanup" Revert because I had wrong JIRA number (Spotted by Duo Zhang)

Posted by sy...@apache.org.
Revert "HBASE-17346 AggregationClient cleanup"
Revert because I had wrong JIRA number (Spotted by Duo Zhang)

This reverts commit 0a93241b61e6183b5671a4e7940e6212a17acd66.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0583d793
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0583d793
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0583d793

Branch: refs/heads/hbase-12439
Commit: 0583d79346e16962bb35fed7587d56d2ec71c0fa
Parents: 69ce596
Author: Michael Stack <st...@apache.org>
Authored: Tue Jan 3 19:15:53 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Jan 3 19:15:53 2017 -0800

----------------------------------------------------------------------
 .../client/coprocessor/AggregationClient.java   | 94 +++++---------------
 1 file changed, 23 insertions(+), 71 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0583d793/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index d236342..cde7d41 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
+import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse;
@@ -58,8 +59,6 @@ import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Message;
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
 
 /**
  * This client class is for invoking the aggregate functions deployed on the
@@ -82,60 +81,13 @@ import com.google.protobuf.RpcController;
  * </ul>
  * <p>Call {@link #close()} when done.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 public class AggregationClient implements Closeable {
   // TODO: This class is not used.  Move to examples?
   private static final Log log = LogFactory.getLog(AggregationClient.class);
   private final Connection connection;
 
   /**
-   * An RpcController implementation for use here in this endpoint.
-   */
-  static class AggregationClientRpcController implements RpcController {
-    private String errorText;
-    private boolean cancelled = false;
-    private boolean failed = false;
-
-    @Override
-    public String errorText() {
-      return this.errorText;
-    }
-
-    @Override
-    public boolean failed() {
-      return this.failed;
-    }
-
-    @Override
-    public boolean isCanceled() {
-      return this.cancelled;
-    }
-
-    @Override
-    public void notifyOnCancel(RpcCallback<Object> arg0) {
-      throw new UnsupportedOperationException();
-    }
-
-    @Override
-    public void reset() {
-      this.errorText = null;
-      this.cancelled = false;
-      this.failed = false;
-    }
-
-    @Override
-    public void setFailed(String errorText) {
-      this.failed = true;
-      this.errorText = errorText;
-    }
-
-    @Override
-    public void startCancel() {
-      this.cancelled = true;
-    }
-  }
-
-  /**
    * Constructor with Conf object
    * @param cfg
    */
@@ -208,13 +160,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, R>() {
           @Override
           public R call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMax(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -296,13 +248,13 @@ public class AggregationClient implements Closeable {
 
           @Override
           public R call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMin(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -371,13 +323,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Long>() {
           @Override
           public Long call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getRowNum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
             ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
@@ -436,14 +388,14 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, S>() {
           @Override
           public S call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             // Not sure what is going on here why I have to do these casts. TODO.
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getSum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             if (response.getFirstPartCount() == 0) {
               return null;
@@ -504,13 +456,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<S, Long>>() {
           @Override
           public Pair<S, Long> call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getAvg(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
             if (response.getFirstPartCount() == 0) {
@@ -608,13 +560,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<List<S>, Long>>() {
           @Override
           public Pair<List<S>, Long> call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getStd(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
             Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L);
             if (response.getFirstPartCount() == 0) {
@@ -724,13 +676,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, List<S>>() {
           @Override
           public List<S> call(AggregateService instance) throws IOException {
-            RpcController controller = new AggregationClientRpcController();
+            ServerRpcController controller = new ServerRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMedian(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failed()) {
-              throw new IOException(controller.errorText());
+            if (controller.failedOnException()) {
+              throw controller.getFailedOn();
             }
 
             List<S> list = new ArrayList<S>();


[03/50] [abbrv] hbase git commit: HBASE-11392 add/remove peer requests should be routed through master

Posted by sy...@apache.org.
HBASE-11392 add/remove peer requests should be routed through master


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e1f4aaea
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e1f4aaea
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e1f4aaea

Branch: refs/heads/hbase-12439
Commit: e1f4aaeacdcbaffb02a08c29493601547c381941
Parents: 3826e63
Author: Guanghao Zhang <zg...@gmail.com>
Authored: Tue Dec 20 21:20:58 2016 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Wed Dec 21 13:27:13 2016 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |   19 +
 .../hbase/client/ConnectionImplementation.java  |   16 +
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   26 +
 .../client/replication/ReplicationAdmin.java    |   26 +-
 .../hbase/replication/ReplicationFactory.java   |    8 +-
 .../hbase/shaded/protobuf/RequestConverter.java |   20 +
 .../shaded/protobuf/generated/MasterProtos.java |  832 ++++---
 .../protobuf/generated/ReplicationProtos.java   | 2158 ++++++++++++++++++
 .../src/main/protobuf/Master.proto              |    9 +
 .../src/main/protobuf/Replication.proto         |   42 +
 .../hbase/coprocessor/MasterObserver.java       |   42 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   37 +
 .../hbase/master/MasterCoprocessorHost.java     |   42 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   29 +
 .../hadoop/hbase/master/MasterServices.java     |   16 +
 .../master/replication/ReplicationManager.java  |  105 +
 .../hbase/security/access/AccessController.java |   13 +
 .../replication/TestReplicationAdmin.java       |   20 +-
 .../hbase/master/MockNoopMasterServices.java    |   11 +
 .../hbase/master/TestMasterNoCluster.java       |    5 +-
 .../hbase/replication/TestReplicationBase.java  |   10 +-
 .../replication/TestReplicationWithTags.java    |    8 +-
 .../replication/TestSerialReplication.java      |    6 +-
 .../security/access/TestAccessController.java   |   30 +
 ...sibilityLabelReplicationWithExpAsString.java |    8 +-
 .../TestVisibilityLabelsReplication.java        |    8 +-
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |    2 +
 27 files changed, 3173 insertions(+), 375 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 5b53a7e..d284fc8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException;
 import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException;
 import org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
@@ -1823,4 +1824,22 @@ public interface Admin extends Abortable, Closeable {
    * @return true if the switch is enabled, false otherwise.
    */
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws IOException;
+
+  /**
+   * Add a new replication peer for replicating data to slave cluster
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication slave cluster
+   * @throws IOException
+   */
+  default void addReplicationPeer(final String peerId, final ReplicationPeerConfig peerConfig)
+      throws IOException {
+  }
+
+  /**
+   * Remove a peer and stop the replication
+   * @param peerId a short name that identifies the peer
+   * @throws IOException
+   */
+  default void removeReplicationPeer(final String peerId) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 0c512be..4e31f2c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -88,6 +88,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCa
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
@@ -1637,6 +1641,18 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
           SecurityCapabilitiesRequest request) throws ServiceException {
         return stub.getSecurityCapabilities(controller, request);
       }
+
+      @Override
+      public AddReplicationPeerResponse addReplicationPeer(RpcController controller,
+          AddReplicationPeerRequest request) throws ServiceException {
+        return stub.addReplicationPeer(controller, request);
+      }
+
+      @Override
+      public RemoveReplicationPeerResponse removeReplicationPeer(RpcController controller,
+          RemoveReplicationPeerRequest request) throws ServiceException {
+        return stub.removeReplicationPeer(controller, request);
+      }
     };
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 9bfe276..19831c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -77,6 +77,7 @@ import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
 import org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
@@ -3744,4 +3745,29 @@ public class HBaseAdmin implements Admin {
   private RpcControllerFactory getRpcControllerFactory() {
     return this.rpcControllerFactory;
   }
+
+  @Override
+  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
+      throws IOException {
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      protected Void rpcCall() throws Exception {
+        master.addReplicationPeer(getRpcController(),
+          RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig));
+        return null;
+      }
+    });
+  }
+
+  @Override
+  public void removeReplicationPeer(String peerId) throws IOException {
+    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
+      @Override
+      protected Void rpcCall() throws Exception {
+        master.removeReplicationPeer(getRpcController(),
+          RequestConverter.buildRemoveReplicationPeerRequest(peerId));
+        return null;
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 25590c5..e6b9b0d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -80,9 +81,12 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
  * To see which commands are available in the shell, type
  * <code>replication</code>.
  * </p>
+ *
+ * @deprecated use {@link org.apache.hadoop.hbase.client.Admin} instead.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
+@Deprecated
 public class ReplicationAdmin implements Closeable {
   private static final Log LOG = LogFactory.getLog(ReplicationAdmin.class);
 
@@ -108,6 +112,8 @@ public class ReplicationAdmin implements Closeable {
    */
   private final ZooKeeperWatcher zkw;
 
+  private Admin admin;
+
   /**
    * Constructor that creates a connection to the local ZooKeeper ensemble.
    * @param conf Configuration to use
@@ -116,6 +122,7 @@ public class ReplicationAdmin implements Closeable {
    */
   public ReplicationAdmin(Configuration conf) throws IOException {
     this.connection = ConnectionFactory.createConnection(conf);
+    admin = connection.getAdmin();
     try {
       zkw = createZooKeeperWatcher();
       try {
@@ -133,9 +140,7 @@ public class ReplicationAdmin implements Closeable {
         throw exception;
       }
     } catch (Exception exception) {
-      if (connection != null) {
-        connection.close();
-      }
+      connection.close();
       if (exception instanceof IOException) {
         throw (IOException) exception;
       } else if (exception instanceof RuntimeException) {
@@ -176,11 +181,12 @@ public class ReplicationAdmin implements Closeable {
    */
   @Deprecated
   public void addPeer(String id, ReplicationPeerConfig peerConfig,
-      Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException {
+      Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException,
+      IOException {
     if (tableCfs != null) {
       peerConfig.setTableCFsMap(tableCfs);
     }
-    this.replicationPeers.registerPeer(id, peerConfig);
+    this.admin.addReplicationPeer(id, peerConfig);
   }
 
   /**
@@ -188,10 +194,11 @@ public class ReplicationAdmin implements Closeable {
    * @param id a short name that identifies the cluster
    * @param peerConfig configuration for the replication slave cluster
    */
-  public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException {
+  public void addPeer(String id, ReplicationPeerConfig peerConfig) throws ReplicationException,
+      IOException {
     checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
       peerConfig.getTableCFsMap());
-    this.replicationPeers.registerPeer(id, peerConfig);
+    this.admin.addReplicationPeer(id, peerConfig);
   }
 
   /**
@@ -213,8 +220,8 @@ public class ReplicationAdmin implements Closeable {
    * Removes a peer cluster and stops the replication to it.
    * @param id a short name that identifies the cluster
    */
-  public void removePeer(String id) throws ReplicationException {
-    this.replicationPeers.unregisterPeer(id);
+  public void removePeer(String id) throws IOException {
+    this.admin.removeReplicationPeer(id);
   }
 
   /**
@@ -403,6 +410,7 @@ public class ReplicationAdmin implements Closeable {
     if (this.connection != null) {
       this.connection.close();
     }
+    admin.close();
   }
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index bc7a4ce..8506cbb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -41,10 +41,10 @@ public class ReplicationFactory {
   }
 
   public static ReplicationQueuesClient getReplicationQueuesClient(
-      ReplicationQueuesClientArguments args)
-    throws Exception {
-    Class<?> classToBuild = args.getConf().getClass("hbase.region.replica." +
-      "replication.replicationQueuesClient.class", ReplicationQueuesClientZKImpl.class);
+      ReplicationQueuesClientArguments args) throws Exception {
+    Class<?> classToBuild = args.getConf().getClass(
+      "hbase.region.replica.replication.replicationQueuesClient.class",
+      ReplicationQueuesClientZKImpl.class);
     return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index f938fd0..cd4712a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -44,8 +44,10 @@ import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
 import org.apache.hadoop.hbase.client.Row;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
@@ -110,6 +112,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
@@ -1560,4 +1565,19 @@ public final class RequestConverter {
     }
     throw new UnsupportedOperationException("Unsupport switch type:" + switchType);
   }
+
+  public static ReplicationProtos.AddReplicationPeerRequest buildAddReplicationPeerRequest(
+      String peerId, ReplicationPeerConfig peerConfig) {
+    AddReplicationPeerRequest.Builder builder = AddReplicationPeerRequest.newBuilder();
+    builder.setPeerId(peerId);
+    builder.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
+    return builder.build();
+  }
+
+  public static ReplicationProtos.RemoveReplicationPeerRequest buildRemoveReplicationPeerRequest(
+      String peerId) {
+    RemoveReplicationPeerRequest.Builder builder = RemoveReplicationPeerRequest.newBuilder();
+    builder.setPeerId(peerId);
+    return builder.build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 56442d1..da5de63 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -66344,6 +66344,30 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
 
+      /**
+       * <pre>
+       ** Add a replication peer 
+       * </pre>
+       *
+       * <code>rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);</code>
+       */
+      public abstract void addReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done);
+
+      /**
+       * <pre>
+       ** Remove a replication peer 
+       * </pre>
+       *
+       * <code>rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);</code>
+       */
+      public abstract void removeReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
+
     }
 
     public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@@ -66813,6 +66837,22 @@ public final class MasterProtos {
           impl.listProcedures(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void addReplicationPeer(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done) {
+          impl.addReplicationPeer(controller, request, done);
+        }
+
+        @java.lang.Override
+        public  void removeReplicationPeer(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done) {
+          impl.removeReplicationPeer(controller, request, done);
+        }
+
       };
     }
 
@@ -66951,6 +66991,10 @@ public final class MasterProtos {
               return impl.abortProcedure(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest)request);
             case 57:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request);
+            case 58:
+              return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
+            case 59:
+              return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -67081,6 +67125,10 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
             case 57:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
+            case 58:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+            case 59:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -67211,6 +67259,10 @@ public final class MasterProtos {
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
             case 57:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
+            case 58:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+            case 59:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -67944,6 +67996,30 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
 
+    /**
+     * <pre>
+     ** Add a replication peer 
+     * </pre>
+     *
+     * <code>rpc AddReplicationPeer(.hbase.pb.AddReplicationPeerRequest) returns (.hbase.pb.AddReplicationPeerResponse);</code>
+     */
+    public abstract void addReplicationPeer(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done);
+
+    /**
+     * <pre>
+     ** Remove a replication peer 
+     * </pre>
+     *
+     * <code>rpc RemoveReplicationPeer(.hbase.pb.RemoveReplicationPeerRequest) returns (.hbase.pb.RemoveReplicationPeerResponse);</code>
+     */
+    public abstract void removeReplicationPeer(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done);
+
     public static final
         org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
         getDescriptor() {
@@ -68256,6 +68332,16 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse>specializeCallback(
               done));
           return;
+        case 58:
+          this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse>specializeCallback(
+              done));
+          return;
+        case 59:
+          this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse>specializeCallback(
+              done));
+          return;
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -68386,6 +68472,10 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest.getDefaultInstance();
         case 57:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
+        case 58:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+        case 59:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -68516,6 +68606,10 @@ public final class MasterProtos {
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse.getDefaultInstance();
         case 57:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
+        case 58:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+        case 59:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
       }
@@ -69406,6 +69500,36 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.class,
             org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()));
       }
+
+      public  void addReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(58),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()));
+      }
+
+      public  void removeReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(59),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance()));
+      }
     }
 
     public static BlockingInterface newBlockingStub(
@@ -69703,6 +69827,16 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
     }
 
     private static final class BlockingStub implements BlockingInterface {
@@ -70407,6 +70541,30 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance());
       }
 
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(58),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance());
+      }
+
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse removeReplicationPeer(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(59),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
+      }
+
     }
 
     // @@protoc_insertion_point(class_scope:hbase.pb.MasterService)
@@ -70989,340 +71147,346 @@ public final class MasterProtos {
       "\n\014Master.proto\022\010hbase.pb\032\013HBase.proto\032\014C" +
       "lient.proto\032\023ClusterStatus.proto\032\023ErrorH" +
       "andling.proto\032\017Procedure.proto\032\013Quota.pr" +
-      "oto\"\234\001\n\020AddColumnRequest\022\'\n\ntable_name\030\001" +
-      " \002(\0132\023.hbase.pb.TableName\0225\n\017column_fami" +
-      "lies\030\002 \002(\0132\034.hbase.pb.ColumnFamilySchema" +
-      "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:" +
-      "\0010\"$\n\021AddColumnResponse\022\017\n\007proc_id\030\001 \001(\004" +
-      "\"}\n\023DeleteColumnRequest\022\'\n\ntable_name\030\001 " +
-      "\002(\0132\023.hbase.pb.TableName\022\023\n\013column_name\030",
-      "\002 \002(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" +
-      " \001(\004:\0010\"\'\n\024DeleteColumnResponse\022\017\n\007proc_" +
-      "id\030\001 \001(\004\"\237\001\n\023ModifyColumnRequest\022\'\n\ntabl" +
-      "e_name\030\001 \002(\0132\023.hbase.pb.TableName\0225\n\017col" +
-      "umn_families\030\002 \002(\0132\034.hbase.pb.ColumnFami" +
-      "lySchema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonc" +
-      "e\030\004 \001(\004:\0010\"\'\n\024ModifyColumnResponse\022\017\n\007pr" +
-      "oc_id\030\001 \001(\004\"n\n\021MoveRegionRequest\022)\n\006regi" +
-      "on\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\020d" +
-      "est_server_name\030\002 \001(\0132\024.hbase.pb.ServerN",
-      "ame\"\024\n\022MoveRegionResponse\"\274\001\n\035DispatchMe" +
-      "rgingRegionsRequest\022+\n\010region_a\030\001 \002(\0132\031." +
-      "hbase.pb.RegionSpecifier\022+\n\010region_b\030\002 \002" +
-      "(\0132\031.hbase.pb.RegionSpecifier\022\027\n\010forcibl" +
-      "e\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020" +
-      "\n\005nonce\030\005 \001(\004:\0010\"1\n\036DispatchMergingRegio" +
-      "nsResponse\022\017\n\007proc_id\030\001 \001(\004\"\210\001\n\030MergeTab" +
-      "leRegionsRequest\022)\n\006region\030\001 \003(\0132\031.hbase" +
-      ".pb.RegionSpecifier\022\027\n\010forcible\030\003 \001(\010:\005f" +
-      "alse\022\026\n\013nonce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 ",
-      "\001(\004:\0010\",\n\031MergeTableRegionsResponse\022\017\n\007p" +
-      "roc_id\030\001 \001(\004\"@\n\023AssignRegionRequest\022)\n\006r" +
-      "egion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\"\026" +
-      "\n\024AssignRegionResponse\"X\n\025UnassignRegion" +
-      "Request\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Regio" +
-      "nSpecifier\022\024\n\005force\030\002 \001(\010:\005false\"\030\n\026Unas" +
-      "signRegionResponse\"A\n\024OfflineRegionReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\"\027\n\025OfflineRegionResponse\"\177\n\022Create" +
-      "TableRequest\022+\n\014table_schema\030\001 \002(\0132\025.hba",
-      "se.pb.TableSchema\022\022\n\nsplit_keys\030\002 \003(\014\022\026\n" +
-      "\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"" +
-      "&\n\023CreateTableResponse\022\017\n\007proc_id\030\001 \001(\004\"" +
-      "g\n\022DeleteTableRequest\022\'\n\ntable_name\030\001 \002(" +
-      "\0132\023.hbase.pb.TableName\022\026\n\013nonce_group\030\002 " +
-      "\001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023DeleteTableR" +
-      "esponse\022\017\n\007proc_id\030\001 \001(\004\"\207\001\n\024TruncateTab" +
-      "leRequest\022&\n\ttableName\030\001 \002(\0132\023.hbase.pb." +
-      "TableName\022\035\n\016preserveSplits\030\002 \001(\010:\005false" +
-      "\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:",
-      "\0010\"(\n\025TruncateTableResponse\022\017\n\007proc_id\030\001" +
-      " \001(\004\"g\n\022EnableTableRequest\022\'\n\ntable_name" +
-      "\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonce_gro" +
-      "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"&\n\023EnableT" +
-      "ableResponse\022\017\n\007proc_id\030\001 \001(\004\"h\n\023Disable" +
-      "TableRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase" +
-      ".pb.TableName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" +
-      "\005nonce\030\003 \001(\004:\0010\"\'\n\024DisableTableResponse\022" +
-      "\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022ModifyTableRequest\022" +
-      "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName",
-      "\022+\n\014table_schema\030\002 \002(\0132\025.hbase.pb.TableS" +
-      "chema\022\026\n\013nonce_group\030\003 \001(\004:\0010\022\020\n\005nonce\030\004" +
-      " \001(\004:\0010\"&\n\023ModifyTableResponse\022\017\n\007proc_i" +
-      "d\030\001 \001(\004\"~\n\026CreateNamespaceRequest\022:\n\023nam" +
-      "espaceDescriptor\030\001 \002(\0132\035.hbase.pb.Namesp" +
-      "aceDescriptor\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n" +
-      "\005nonce\030\003 \001(\004:\0010\"*\n\027CreateNamespaceRespon" +
-      "se\022\017\n\007proc_id\030\001 \001(\004\"Y\n\026DeleteNamespaceRe" +
-      "quest\022\025\n\rnamespaceName\030\001 \002(\t\022\026\n\013nonce_gr" +
-      "oup\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Delete",
-      "NamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026Mo" +
-      "difyNamespaceRequest\022:\n\023namespaceDescrip" +
-      "tor\030\001 \002(\0132\035.hbase.pb.NamespaceDescriptor" +
-      "\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:" +
-      "\0010\"*\n\027ModifyNamespaceResponse\022\017\n\007proc_id" +
-      "\030\001 \001(\004\"6\n\035GetNamespaceDescriptorRequest\022" +
-      "\025\n\rnamespaceName\030\001 \002(\t\"\\\n\036GetNamespaceDe" +
-      "scriptorResponse\022:\n\023namespaceDescriptor\030" +
-      "\001 \002(\0132\035.hbase.pb.NamespaceDescriptor\"!\n\037" +
-      "ListNamespaceDescriptorsRequest\"^\n ListN",
-      "amespaceDescriptorsResponse\022:\n\023namespace" +
-      "Descriptor\030\001 \003(\0132\035.hbase.pb.NamespaceDes" +
-      "criptor\"?\n&ListTableDescriptorsByNamespa" +
-      "ceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"U\n\'List" +
-      "TableDescriptorsByNamespaceResponse\022*\n\013t" +
-      "ableSchema\030\001 \003(\0132\025.hbase.pb.TableSchema\"" +
-      "9\n ListTableNamesByNamespaceRequest\022\025\n\rn" +
-      "amespaceName\030\001 \002(\t\"K\n!ListTableNamesByNa" +
-      "mespaceResponse\022&\n\ttableName\030\001 \003(\0132\023.hba" +
-      "se.pb.TableName\"\021\n\017ShutdownRequest\"\022\n\020Sh",
-      "utdownResponse\"\023\n\021StopMasterRequest\"\024\n\022S" +
-      "topMasterResponse\"\034\n\032IsInMaintenanceMode" +
-      "Request\"8\n\033IsInMaintenanceModeResponse\022\031" +
-      "\n\021inMaintenanceMode\030\001 \002(\010\"\037\n\016BalanceRequ" +
-      "est\022\r\n\005force\030\001 \001(\010\"\'\n\017BalanceResponse\022\024\n" +
-      "\014balancer_ran\030\001 \002(\010\"<\n\031SetBalancerRunnin" +
-      "gRequest\022\n\n\002on\030\001 \002(\010\022\023\n\013synchronous\030\002 \001(" +
-      "\010\"8\n\032SetBalancerRunningResponse\022\032\n\022prev_" +
-      "balance_value\030\001 \001(\010\"\032\n\030IsBalancerEnabled" +
-      "Request\",\n\031IsBalancerEnabledResponse\022\017\n\007",
-      "enabled\030\001 \002(\010\"w\n\035SetSplitOrMergeEnabledR" +
-      "equest\022\017\n\007enabled\030\001 \002(\010\022\023\n\013synchronous\030\002" +
-      " \001(\010\0220\n\014switch_types\030\003 \003(\0162\032.hbase.pb.Ma" +
-      "sterSwitchType\"4\n\036SetSplitOrMergeEnabled" +
-      "Response\022\022\n\nprev_value\030\001 \003(\010\"O\n\034IsSplitO" +
-      "rMergeEnabledRequest\022/\n\013switch_type\030\001 \002(" +
-      "\0162\032.hbase.pb.MasterSwitchType\"0\n\035IsSplit" +
-      "OrMergeEnabledResponse\022\017\n\007enabled\030\001 \002(\010\"" +
-      "\022\n\020NormalizeRequest\"+\n\021NormalizeResponse" +
-      "\022\026\n\016normalizer_ran\030\001 \002(\010\")\n\033SetNormalize",
-      "rRunningRequest\022\n\n\002on\030\001 \002(\010\"=\n\034SetNormal" +
-      "izerRunningResponse\022\035\n\025prev_normalizer_v" +
-      "alue\030\001 \001(\010\"\034\n\032IsNormalizerEnabledRequest" +
-      "\".\n\033IsNormalizerEnabledResponse\022\017\n\007enabl" +
-      "ed\030\001 \002(\010\"\027\n\025RunCatalogScanRequest\"-\n\026Run" +
-      "CatalogScanResponse\022\023\n\013scan_result\030\001 \001(\005" +
-      "\"-\n\033EnableCatalogJanitorRequest\022\016\n\006enabl" +
-      "e\030\001 \002(\010\"2\n\034EnableCatalogJanitorResponse\022" +
-      "\022\n\nprev_value\030\001 \001(\010\" \n\036IsCatalogJanitorE" +
-      "nabledRequest\"0\n\037IsCatalogJanitorEnabled",
-      "Response\022\r\n\005value\030\001 \002(\010\"B\n\017SnapshotReque" +
-      "st\022/\n\010snapshot\030\001 \002(\0132\035.hbase.pb.Snapshot" +
-      "Description\",\n\020SnapshotResponse\022\030\n\020expec" +
-      "ted_timeout\030\001 \002(\003\"\036\n\034GetCompletedSnapsho" +
-      "tsRequest\"Q\n\035GetCompletedSnapshotsRespon" +
-      "se\0220\n\tsnapshots\030\001 \003(\0132\035.hbase.pb.Snapsho" +
-      "tDescription\"H\n\025DeleteSnapshotRequest\022/\n" +
+      "oto\032\021Replication.proto\"\234\001\n\020AddColumnRequ" +
+      "est\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Table" +
+      "Name\0225\n\017column_families\030\002 \002(\0132\034.hbase.pb" +
+      ".ColumnFamilySchema\022\026\n\013nonce_group\030\003 \001(\004" +
+      ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"$\n\021AddColumnRespon" +
+      "se\022\017\n\007proc_id\030\001 \001(\004\"}\n\023DeleteColumnReque" +
+      "st\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableN",
+      "ame\022\023\n\013column_name\030\002 \002(\014\022\026\n\013nonce_group\030" +
+      "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024DeleteColu" +
+      "mnResponse\022\017\n\007proc_id\030\001 \001(\004\"\237\001\n\023ModifyCo" +
+      "lumnRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase." +
+      "pb.TableName\0225\n\017column_families\030\002 \002(\0132\034." +
+      "hbase.pb.ColumnFamilySchema\022\026\n\013nonce_gro" +
+      "up\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"\'\n\024ModifyC" +
+      "olumnResponse\022\017\n\007proc_id\030\001 \001(\004\"n\n\021MoveRe" +
+      "gionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
+      "egionSpecifier\022.\n\020dest_server_name\030\002 \001(\013",
+      "2\024.hbase.pb.ServerName\"\024\n\022MoveRegionResp" +
+      "onse\"\274\001\n\035DispatchMergingRegionsRequest\022+" +
+      "\n\010region_a\030\001 \002(\0132\031.hbase.pb.RegionSpecif" +
+      "ier\022+\n\010region_b\030\002 \002(\0132\031.hbase.pb.RegionS" +
+      "pecifier\022\027\n\010forcible\030\003 \001(\010:\005false\022\026\n\013non" +
+      "ce_group\030\004 \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\"1\n\036D" +
+      "ispatchMergingRegionsResponse\022\017\n\007proc_id" +
+      "\030\001 \001(\004\"\210\001\n\030MergeTableRegionsRequest\022)\n\006r" +
+      "egion\030\001 \003(\0132\031.hbase.pb.RegionSpecifier\022\027" +
+      "\n\010forcible\030\003 \001(\010:\005false\022\026\n\013nonce_group\030\004",
+      " \001(\004:\0010\022\020\n\005nonce\030\005 \001(\004:\0010\",\n\031MergeTableR" +
+      "egionsResponse\022\017\n\007proc_id\030\001 \001(\004\"@\n\023Assig" +
+      "nRegionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.p" +
+      "b.RegionSpecifier\"\026\n\024AssignRegionRespons" +
+      "e\"X\n\025UnassignRegionRequest\022)\n\006region\030\001 \002" +
+      "(\0132\031.hbase.pb.RegionSpecifier\022\024\n\005force\030\002" +
+      " \001(\010:\005false\"\030\n\026UnassignRegionResponse\"A\n" +
+      "\024OfflineRegionRequest\022)\n\006region\030\001 \002(\0132\031." +
+      "hbase.pb.RegionSpecifier\"\027\n\025OfflineRegio" +
+      "nResponse\"\177\n\022CreateTableRequest\022+\n\014table",
+      "_schema\030\001 \002(\0132\025.hbase.pb.TableSchema\022\022\n\n" +
+      "split_keys\030\002 \003(\014\022\026\n\013nonce_group\030\003 \001(\004:\0010" +
+      "\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023CreateTableRespons" +
+      "e\022\017\n\007proc_id\030\001 \001(\004\"g\n\022DeleteTableRequest" +
+      "\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableNam" +
+      "e\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004" +
+      ":\0010\"&\n\023DeleteTableResponse\022\017\n\007proc_id\030\001 " +
+      "\001(\004\"\207\001\n\024TruncateTableRequest\022&\n\ttableNam" +
+      "e\030\001 \002(\0132\023.hbase.pb.TableName\022\035\n\016preserve" +
+      "Splits\030\002 \001(\010:\005false\022\026\n\013nonce_group\030\003 \001(\004",
+      ":\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"(\n\025TruncateTableRe" +
+      "sponse\022\017\n\007proc_id\030\001 \001(\004\"g\n\022EnableTableRe" +
+      "quest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.Tab" +
+      "leName\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
+      "\003 \001(\004:\0010\"&\n\023EnableTableResponse\022\017\n\007proc_" +
+      "id\030\001 \001(\004\"h\n\023DisableTableRequest\022\'\n\ntable" +
+      "_name\030\001 \002(\0132\023.hbase.pb.TableName\022\026\n\013nonc" +
+      "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"\'\n\024Di" +
+      "sableTableResponse\022\017\n\007proc_id\030\001 \001(\004\"\224\001\n\022" +
+      "ModifyTableRequest\022\'\n\ntable_name\030\001 \002(\0132\023",
+      ".hbase.pb.TableName\022+\n\014table_schema\030\002 \002(" +
+      "\0132\025.hbase.pb.TableSchema\022\026\n\013nonce_group\030" +
+      "\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"&\n\023ModifyTabl" +
+      "eResponse\022\017\n\007proc_id\030\001 \001(\004\"~\n\026CreateName" +
+      "spaceRequest\022:\n\023namespaceDescriptor\030\001 \002(" +
+      "\0132\035.hbase.pb.NamespaceDescriptor\022\026\n\013nonc" +
+      "e_group\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Cr" +
+      "eateNamespaceResponse\022\017\n\007proc_id\030\001 \001(\004\"Y" +
+      "\n\026DeleteNamespaceRequest\022\025\n\rnamespaceNam" +
+      "e\030\001 \002(\t\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce",
+      "\030\003 \001(\004:\0010\"*\n\027DeleteNamespaceResponse\022\017\n\007" +
+      "proc_id\030\001 \001(\004\"~\n\026ModifyNamespaceRequest\022" +
+      ":\n\023namespaceDescriptor\030\001 \002(\0132\035.hbase.pb." +
+      "NamespaceDescriptor\022\026\n\013nonce_group\030\002 \001(\004" +
+      ":\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027ModifyNamespace" +
+      "Response\022\017\n\007proc_id\030\001 \001(\004\"6\n\035GetNamespac" +
+      "eDescriptorRequest\022\025\n\rnamespaceName\030\001 \002(" +
+      "\t\"\\\n\036GetNamespaceDescriptorResponse\022:\n\023n" +
+      "amespaceDescriptor\030\001 \002(\0132\035.hbase.pb.Name" +
+      "spaceDescriptor\"!\n\037ListNamespaceDescript",
+      "orsRequest\"^\n ListNamespaceDescriptorsRe" +
+      "sponse\022:\n\023namespaceDescriptor\030\001 \003(\0132\035.hb" +
+      "ase.pb.NamespaceDescriptor\"?\n&ListTableD" +
+      "escriptorsByNamespaceRequest\022\025\n\rnamespac" +
+      "eName\030\001 \002(\t\"U\n\'ListTableDescriptorsByNam" +
+      "espaceResponse\022*\n\013tableSchema\030\001 \003(\0132\025.hb" +
+      "ase.pb.TableSchema\"9\n ListTableNamesByNa" +
+      "mespaceRequest\022\025\n\rnamespaceName\030\001 \002(\t\"K\n" +
+      "!ListTableNamesByNamespaceResponse\022&\n\tta" +
+      "bleName\030\001 \003(\0132\023.hbase.pb.TableName\"\021\n\017Sh",
+      "utdownRequest\"\022\n\020ShutdownResponse\"\023\n\021Sto" +
+      "pMasterRequest\"\024\n\022StopMasterResponse\"\034\n\032" +
+      "IsInMaintenanceModeRequest\"8\n\033IsInMainte" +
+      "nanceModeResponse\022\031\n\021inMaintenanceMode\030\001" +
+      " \002(\010\"\037\n\016BalanceRequest\022\r\n\005force\030\001 \001(\010\"\'\n" +
+      "\017BalanceResponse\022\024\n\014balancer_ran\030\001 \002(\010\"<" +
+      "\n\031SetBalancerRunningRequest\022\n\n\002on\030\001 \002(\010\022" +
+      "\023\n\013synchronous\030\002 \001(\010\"8\n\032SetBalancerRunni" +
+      "ngResponse\022\032\n\022prev_balance_value\030\001 \001(\010\"\032" +
+      "\n\030IsBalancerEnabledRequest\",\n\031IsBalancer",
+      "EnabledResponse\022\017\n\007enabled\030\001 \002(\010\"w\n\035SetS" +
+      "plitOrMergeEnabledRequest\022\017\n\007enabled\030\001 \002" +
+      "(\010\022\023\n\013synchronous\030\002 \001(\010\0220\n\014switch_types\030" +
+      "\003 \003(\0162\032.hbase.pb.MasterSwitchType\"4\n\036Set" +
+      "SplitOrMergeEnabledResponse\022\022\n\nprev_valu" +
+      "e\030\001 \003(\010\"O\n\034IsSplitOrMergeEnabledRequest\022" +
+      "/\n\013switch_type\030\001 \002(\0162\032.hbase.pb.MasterSw" +
+      "itchType\"0\n\035IsSplitOrMergeEnabledRespons" +
+      "e\022\017\n\007enabled\030\001 \002(\010\"\022\n\020NormalizeRequest\"+" +
+      "\n\021NormalizeResponse\022\026\n\016normalizer_ran\030\001 ",
+      "\002(\010\")\n\033SetNormalizerRunningRequest\022\n\n\002on" +
+      "\030\001 \002(\010\"=\n\034SetNormalizerRunningResponse\022\035" +
+      "\n\025prev_normalizer_value\030\001 \001(\010\"\034\n\032IsNorma" +
+      "lizerEnabledRequest\".\n\033IsNormalizerEnabl" +
+      "edResponse\022\017\n\007enabled\030\001 \002(\010\"\027\n\025RunCatalo" +
+      "gScanRequest\"-\n\026RunCatalogScanResponse\022\023" +
+      "\n\013scan_result\030\001 \001(\005\"-\n\033EnableCatalogJani" +
+      "torRequest\022\016\n\006enable\030\001 \002(\010\"2\n\034EnableCata" +
+      "logJanitorResponse\022\022\n\nprev_value\030\001 \001(\010\" " +
+      "\n\036IsCatalogJanitorEnabledRequest\"0\n\037IsCa",
+      "talogJanitorEnabledResponse\022\r\n\005value\030\001 \002" +
+      "(\010\"B\n\017SnapshotRequest\022/\n\010snapshot\030\001 \002(\0132" +
+      "\035.hbase.pb.SnapshotDescription\",\n\020Snapsh" +
+      "otResponse\022\030\n\020expected_timeout\030\001 \002(\003\"\036\n\034" +
+      "GetCompletedSnapshotsRequest\"Q\n\035GetCompl" +
+      "etedSnapshotsResponse\0220\n\tsnapshots\030\001 \003(\013" +
+      "2\035.hbase.pb.SnapshotDescription\"H\n\025Delet" +
+      "eSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.hba" +
+      "se.pb.SnapshotDescription\"\030\n\026DeleteSnaps" +
+      "hotResponse\"s\n\026RestoreSnapshotRequest\022/\n",
       "\010snapshot\030\001 \002(\0132\035.hbase.pb.SnapshotDescr" +
-      "iption\"\030\n\026DeleteSnapshotResponse\"s\n\026Rest" +
-      "oreSnapshotRequest\022/\n\010snapshot\030\001 \002(\0132\035.h",
-      "base.pb.SnapshotDescription\022\026\n\013nonce_gro" +
-      "up\030\002 \001(\004:\0010\022\020\n\005nonce\030\003 \001(\004:\0010\"*\n\027Restore" +
-      "SnapshotResponse\022\017\n\007proc_id\030\001 \002(\004\"H\n\025IsS" +
-      "napshotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.h" +
-      "base.pb.SnapshotDescription\"^\n\026IsSnapsho" +
-      "tDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\022/\n\010sn" +
-      "apshot\030\002 \001(\0132\035.hbase.pb.SnapshotDescript" +
-      "ion\"O\n\034IsRestoreSnapshotDoneRequest\022/\n\010s" +
-      "napshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescrip" +
-      "tion\"4\n\035IsRestoreSnapshotDoneResponse\022\023\n",
-      "\004done\030\001 \001(\010:\005false\"F\n\033GetSchemaAlterStat" +
-      "usRequest\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb" +
-      ".TableName\"T\n\034GetSchemaAlterStatusRespon" +
-      "se\022\035\n\025yet_to_update_regions\030\001 \001(\r\022\025\n\rtot" +
-      "al_regions\030\002 \001(\r\"\213\001\n\032GetTableDescriptors" +
-      "Request\022(\n\013table_names\030\001 \003(\0132\023.hbase.pb." +
-      "TableName\022\r\n\005regex\030\002 \001(\t\022!\n\022include_sys_" +
-      "tables\030\003 \001(\010:\005false\022\021\n\tnamespace\030\004 \001(\t\"J" +
-      "\n\033GetTableDescriptorsResponse\022+\n\014table_s" +
-      "chema\030\001 \003(\0132\025.hbase.pb.TableSchema\"[\n\024Ge",
-      "tTableNamesRequest\022\r\n\005regex\030\001 \001(\t\022!\n\022inc" +
-      "lude_sys_tables\030\002 \001(\010:\005false\022\021\n\tnamespac" +
-      "e\030\003 \001(\t\"A\n\025GetTableNamesResponse\022(\n\013tabl" +
-      "e_names\030\001 \003(\0132\023.hbase.pb.TableName\"?\n\024Ge" +
-      "tTableStateRequest\022\'\n\ntable_name\030\001 \002(\0132\023" +
-      ".hbase.pb.TableName\"B\n\025GetTableStateResp" +
-      "onse\022)\n\013table_state\030\001 \002(\0132\024.hbase.pb.Tab" +
-      "leState\"\031\n\027GetClusterStatusRequest\"K\n\030Ge" +
-      "tClusterStatusResponse\022/\n\016cluster_status" +
-      "\030\001 \002(\0132\027.hbase.pb.ClusterStatus\"\030\n\026IsMas",
-      "terRunningRequest\"4\n\027IsMasterRunningResp" +
-      "onse\022\031\n\021is_master_running\030\001 \002(\010\"I\n\024ExecP" +
-      "rocedureRequest\0221\n\tprocedure\030\001 \002(\0132\036.hba" +
-      "se.pb.ProcedureDescription\"F\n\025ExecProced" +
-      "ureResponse\022\030\n\020expected_timeout\030\001 \001(\003\022\023\n" +
-      "\013return_data\030\002 \001(\014\"K\n\026IsProcedureDoneReq" +
-      "uest\0221\n\tprocedure\030\001 \001(\0132\036.hbase.pb.Proce" +
-      "dureDescription\"`\n\027IsProcedureDoneRespon" +
-      "se\022\023\n\004done\030\001 \001(\010:\005false\0220\n\010snapshot\030\002 \001(" +
-      "\0132\036.hbase.pb.ProcedureDescription\",\n\031Get",
-      "ProcedureResultRequest\022\017\n\007proc_id\030\001 \002(\004\"" +
-      "\371\001\n\032GetProcedureResultResponse\0229\n\005state\030" +
-      "\001 \002(\0162*.hbase.pb.GetProcedureResultRespo" +
-      "nse.State\022\022\n\nstart_time\030\002 \001(\004\022\023\n\013last_up" +
-      "date\030\003 \001(\004\022\016\n\006result\030\004 \001(\014\0224\n\texception\030" +
-      "\005 \001(\0132!.hbase.pb.ForeignExceptionMessage" +
-      "\"1\n\005State\022\r\n\tNOT_FOUND\020\000\022\013\n\007RUNNING\020\001\022\014\n" +
-      "\010FINISHED\020\002\"M\n\025AbortProcedureRequest\022\017\n\007" +
-      "proc_id\030\001 \002(\004\022#\n\025mayInterruptIfRunning\030\002" +
-      " \001(\010:\004true\"6\n\026AbortProcedureResponse\022\034\n\024",
-      "is_procedure_aborted\030\001 \002(\010\"\027\n\025ListProced" +
-      "uresRequest\"@\n\026ListProceduresResponse\022&\n" +
-      "\tprocedure\030\001 \003(\0132\023.hbase.pb.Procedure\"\315\001" +
-      "\n\017SetQuotaRequest\022\021\n\tuser_name\030\001 \001(\t\022\022\n\n" +
-      "user_group\030\002 \001(\t\022\021\n\tnamespace\030\003 \001(\t\022\'\n\nt" +
-      "able_name\030\004 \001(\0132\023.hbase.pb.TableName\022\022\n\n" +
-      "remove_all\030\005 \001(\010\022\026\n\016bypass_globals\030\006 \001(\010" +
-      "\022+\n\010throttle\030\007 \001(\0132\031.hbase.pb.ThrottleRe" +
-      "quest\"\022\n\020SetQuotaResponse\"J\n\037MajorCompac" +
-      "tionTimestampRequest\022\'\n\ntable_name\030\001 \002(\013",
-      "2\023.hbase.pb.TableName\"U\n(MajorCompaction" +
-      "TimestampForRegionRequest\022)\n\006region\030\001 \002(" +
-      "\0132\031.hbase.pb.RegionSpecifier\"@\n MajorCom" +
-      "pactionTimestampResponse\022\034\n\024compaction_t" +
-      "imestamp\030\001 \002(\003\"\035\n\033SecurityCapabilitiesRe" +
-      "quest\"\354\001\n\034SecurityCapabilitiesResponse\022G" +
-      "\n\014capabilities\030\001 \003(\01621.hbase.pb.Security" +
-      "CapabilitiesResponse.Capability\"\202\001\n\nCapa" +
-      "bility\022\031\n\025SIMPLE_AUTHENTICATION\020\000\022\031\n\025SEC" +
-      "URE_AUTHENTICATION\020\001\022\021\n\rAUTHORIZATION\020\002\022",
-      "\026\n\022CELL_AUTHORIZATION\020\003\022\023\n\017CELL_VISIBILI" +
-      "TY\020\004*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005M" +
-      "ERGE\020\0012\261)\n\rMasterService\022e\n\024GetSchemaAlt" +
-      "erStatus\022%.hbase.pb.GetSchemaAlterStatus" +
-      "Request\032&.hbase.pb.GetSchemaAlterStatusR" +
-      "esponse\022b\n\023GetTableDescriptors\022$.hbase.p" +
-      "b.GetTableDescriptorsRequest\032%.hbase.pb." +
-      "GetTableDescriptorsResponse\022P\n\rGetTableN" +
-      "ames\022\036.hbase.pb.GetTableNamesRequest\032\037.h" +
-      "base.pb.GetTableNamesResponse\022Y\n\020GetClus",
-      "terStatus\022!.hbase.pb.GetClusterStatusReq" +
-      "uest\032\".hbase.pb.GetClusterStatusResponse" +
-      "\022V\n\017IsMasterRunning\022 .hbase.pb.IsMasterR" +
-      "unningRequest\032!.hbase.pb.IsMasterRunning" +
-      "Response\022D\n\tAddColumn\022\032.hbase.pb.AddColu" +
-      "mnRequest\032\033.hbase.pb.AddColumnResponse\022M" +
-      "\n\014DeleteColumn\022\035.hbase.pb.DeleteColumnRe" +
-      "quest\032\036.hbase.pb.DeleteColumnResponse\022M\n" +
-      "\014ModifyColumn\022\035.hbase.pb.ModifyColumnReq" +
-      "uest\032\036.hbase.pb.ModifyColumnResponse\022G\n\n",
-      "MoveRegion\022\033.hbase.pb.MoveRegionRequest\032" +
-      "\034.hbase.pb.MoveRegionResponse\022k\n\026Dispatc" +
-      "hMergingRegions\022\'.hbase.pb.DispatchMergi" +
-      "ngRegionsRequest\032(.hbase.pb.DispatchMerg" +
-      "ingRegionsResponse\022\\\n\021MergeTableRegions\022" +
-      "\".hbase.pb.MergeTableRegionsRequest\032#.hb" +
-      "ase.pb.MergeTableRegionsResponse\022M\n\014Assi" +
-      "gnRegion\022\035.hbase.pb.AssignRegionRequest\032" +
-      "\036.hbase.pb.AssignRegionResponse\022S\n\016Unass" +
-      "ignRegion\022\037.hbase.pb.UnassignRegionReque",
-      "st\032 .hbase.pb.UnassignRegionResponse\022P\n\r" +
-      "OfflineRegion\022\036.hbase.pb.OfflineRegionRe" +
-      "quest\032\037.hbase.pb.OfflineRegionResponse\022J" +
-      "\n\013DeleteTable\022\034.hbase.pb.DeleteTableRequ" +
-      "est\032\035.hbase.pb.DeleteTableResponse\022P\n\rtr" +
-      "uncateTable\022\036.hbase.pb.TruncateTableRequ" +
-      "est\032\037.hbase.pb.TruncateTableResponse\022J\n\013" +
-      "EnableTable\022\034.hbase.pb.EnableTableReques" +
-      "t\032\035.hbase.pb.EnableTableResponse\022M\n\014Disa" +
-      "bleTable\022\035.hbase.pb.DisableTableRequest\032",
-      "\036.hbase.pb.DisableTableResponse\022J\n\013Modif" +
-      "yTable\022\034.hbase.pb.ModifyTableRequest\032\035.h" +
-      "base.pb.ModifyTableResponse\022J\n\013CreateTab" +
-      "le\022\034.hbase.pb.CreateTableRequest\032\035.hbase" +
-      ".pb.CreateTableResponse\022A\n\010Shutdown\022\031.hb" +
-      "ase.pb.ShutdownRequest\032\032.hbase.pb.Shutdo" +
-      "wnResponse\022G\n\nStopMaster\022\033.hbase.pb.Stop" +
-      "MasterRequest\032\034.hbase.pb.StopMasterRespo" +
-      "nse\022h\n\031IsMasterInMaintenanceMode\022$.hbase" +
-      ".pb.IsInMaintenanceModeRequest\032%.hbase.p",
-      "b.IsInMaintenanceModeResponse\022>\n\007Balance" +
-      "\022\030.hbase.pb.BalanceRequest\032\031.hbase.pb.Ba" +
-      "lanceResponse\022_\n\022SetBalancerRunning\022#.hb" +
-      "ase.pb.SetBalancerRunningRequest\032$.hbase" +
-      ".pb.SetBalancerRunningResponse\022\\\n\021IsBala" +
-      "ncerEnabled\022\".hbase.pb.IsBalancerEnabled" +
-      "Request\032#.hbase.pb.IsBalancerEnabledResp" +
-      "onse\022k\n\026SetSplitOrMergeEnabled\022\'.hbase.p" +
-      "b.SetSplitOrMergeEnabledRequest\032(.hbase." +
-      "pb.SetSplitOrMergeEnabledResponse\022h\n\025IsS",
-      "plitOrMergeEnabled\022&.hbase.pb.IsSplitOrM" +
-      "ergeEnabledRequest\032\'.hbase.pb.IsSplitOrM" +
-      "ergeEnabledResponse\022D\n\tNormalize\022\032.hbase" +
-      ".pb.NormalizeRequest\032\033.hbase.pb.Normaliz" +
-      "eResponse\022e\n\024SetNormalizerRunning\022%.hbas" +
-      "e.pb.SetNormalizerRunningRequest\032&.hbase" +
-      ".pb.SetNormalizerRunningResponse\022b\n\023IsNo" +
-      "rmalizerEnabled\022$.hbase.pb.IsNormalizerE" +
-      "nabledRequest\032%.hbase.pb.IsNormalizerEna" +
-      "bledResponse\022S\n\016RunCatalogScan\022\037.hbase.p",
-      "b.RunCatalogScanRequest\032 .hbase.pb.RunCa" +
-      "talogScanResponse\022e\n\024EnableCatalogJanito" +
-      "r\022%.hbase.pb.EnableCatalogJanitorRequest" +
-      "\032&.hbase.pb.EnableCatalogJanitorResponse" +
-      "\022n\n\027IsCatalogJanitorEnabled\022(.hbase.pb.I" +
-      "sCatalogJanitorEnabledRequest\032).hbase.pb" +
-      ".IsCatalogJanitorEnabledResponse\022^\n\021Exec" +
-      "MasterService\022#.hbase.pb.CoprocessorServ" +
-      "iceRequest\032$.hbase.pb.CoprocessorService" +
-      "Response\022A\n\010Snapshot\022\031.hbase.pb.Snapshot",
-      "Request\032\032.hbase.pb.SnapshotResponse\022h\n\025G" +
-      "etCompletedSnapshots\022&.hbase.pb.GetCompl" +
-      "etedSnapshotsRequest\032\'.hbase.pb.GetCompl" +
-      "etedSnapshotsResponse\022S\n\016DeleteSnapshot\022" +
-      "\037.hbase.pb.DeleteSnapshotRequest\032 .hbase" +
-      ".pb.DeleteSnapshotResponse\022S\n\016IsSnapshot" +
-      "Done\022\037.hbase.pb.IsSnapshotDoneRequest\032 ." +
-      "hbase.pb.IsSnapshotDoneResponse\022V\n\017Resto" +
-      "reSnapshot\022 .hbase.pb.RestoreSnapshotReq" +
-      "uest\032!.hbase.pb.RestoreSnapshotResponse\022",
-      "P\n\rExecProcedure\022\036.hbase.pb.ExecProcedur" +
-      "eRequest\032\037.hbase.pb.ExecProcedureRespons" +
-      "e\022W\n\024ExecProcedureWithRet\022\036.hbase.pb.Exe" +
-      "cProcedureRequest\032\037.hbase.pb.ExecProcedu" +
-      "reResponse\022V\n\017IsProcedureDone\022 .hbase.pb" +
-      ".IsProcedureDoneRequest\032!.hbase.pb.IsPro" +
-      "cedureDoneResponse\022V\n\017ModifyNamespace\022 ." +
-      "hbase.pb.ModifyNamespaceRequest\032!.hbase." +
-      "pb.ModifyNamespaceResponse\022V\n\017CreateName" +
-      "space\022 .hbase.pb.CreateNamespaceRequest\032",
-      "!.hbase.pb.CreateNamespaceResponse\022V\n\017De" +
-      "leteNamespace\022 .hbase.pb.DeleteNamespace" +
-      "Request\032!.hbase.pb.DeleteNamespaceRespon" +
-      "se\022k\n\026GetNamespaceDescriptor\022\'.hbase.pb." +
-      "GetNamespaceDescriptorRequest\032(.hbase.pb" +
-      ".GetNamespaceDescriptorResponse\022q\n\030ListN" +
-      "amespaceDescriptors\022).hbase.pb.ListNames" +
-      "paceDescriptorsRequest\032*.hbase.pb.ListNa" +
-      "mespaceDescriptorsResponse\022\206\001\n\037ListTable" +
-      "DescriptorsByNamespace\0220.hbase.pb.ListTa",
-      "bleDescriptorsByNamespaceRequest\0321.hbase" +
-      ".pb.ListTableDescriptorsByNamespaceRespo" +
-      "nse\022t\n\031ListTableNamesByNamespace\022*.hbase" +
-      ".pb.ListTableNamesByNamespaceRequest\032+.h" +
-      "base.pb.ListTableNamesByNamespaceRespons" +
-      "e\022P\n\rGetTableState\022\036.hbase.pb.GetTableSt" +
-      "ateRequest\032\037.hbase.pb.GetTableStateRespo" +
-      "nse\022A\n\010SetQuota\022\031.hbase.pb.SetQuotaReque" +
-      "st\032\032.hbase.pb.SetQuotaResponse\022x\n\037getLas" +
-      "tMajorCompactionTimestamp\022).hbase.pb.Maj",
-      "orCompactionTimestampRequest\032*.hbase.pb." +
-      "MajorCompactionTimestampResponse\022\212\001\n(get" +
-      "LastMajorCompactionTimestampForRegion\0222." +
-      "hbase.pb.MajorCompactionTimestampForRegi" +
-      "onRequest\032*.hbase.pb.MajorCompactionTime" +
-      "stampResponse\022_\n\022getProcedureResult\022#.hb" +
-      "ase.pb.GetProcedureResultRequest\032$.hbase" +
-      ".pb.GetProcedureResultResponse\022h\n\027getSec" +
-      "urityCapabilities\022%.hbase.pb.SecurityCap" +
-      "abilitiesRequest\032&.hbase.pb.SecurityCapa",
-      "bilitiesResponse\022S\n\016AbortProcedure\022\037.hba" +
-      "se.pb.AbortProcedureRequest\032 .hbase.pb.A" +
-      "bortProcedureResponse\022S\n\016ListProcedures\022" +
-      "\037.hbase.pb.ListProceduresRequest\032 .hbase" +
-      ".pb.ListProceduresResponseBI\n1org.apache" +
-      ".hadoop.hbase.shaded.protobuf.generatedB" +
-      "\014MasterProtosH\001\210\001\001\240\001\001"
+      "iption\022\026\n\013nonce_group\030\002 \001(\004:\0010\022\020\n\005nonce\030" +
+      "\003 \001(\004:\0010\"*\n\027RestoreSnapshotResponse\022\017\n\007p" +
+      "roc_id\030\001 \002(\004\"H\n\025IsSnapshotDoneRequest\022/\n" +
+      "\010snapshot\030\001 \001(\0132\035.hbase.pb.SnapshotDescr" +
+      "iption\"^\n\026IsSnapshotDoneResponse\022\023\n\004done" +
+      "\030\001 \001(\010:\005false\022/\n\010snapshot\030\002 \001(\0132\035.hbase." +
+      "pb.SnapshotDescription\"O\n\034IsRestoreSnaps" +
+      "hotDoneRequest\022/\n\010snapshot\030\001 \001(\0132\035.hbase" +
+      ".pb.SnapshotDescription\"4\n\035IsRestoreSnap",
+      "shotDoneResponse\022\023\n\004done\030\001 \001(\010:\005false\"F\n" +
+      "\033GetSchemaAlterStatusRequest\022\'\n\ntable_na" +
+      "me\030\001 \002(\0132\023.hbase.pb.TableName\"T\n\034GetSche" +
+      "maAlterStatusResponse\022\035\n\025yet_to_update_r" +
+      "egions\030\001 \001(\r\022\025\n\rtotal_regions\030\002 \001(\r\"\213\001\n\032" +
+      "GetTableDescriptorsRequest\022(\n\013table_name" +
+      "s\030\001 \003(\0132\023.hbase.pb.TableName\022\r\n\005regex\030\002 " +
+      "\001(\t\022!\n\022include_sys_tables\030\003 \001(\010:\005false\022\021" +
+      "\n\tnamespace\030\004 \001(\t\"J\n\033GetTableDescriptors" +
+      "Response\022+\n\014table_schema\030\001 \003(\0132\025.hbase.p",
+      "b.TableSchema\"[\n\024GetTableNamesRequest\022\r\n" +
+      "\005regex\030\001 \001(\t\022!\n\022include_sys_tables\030\002 \001(\010" +
+      ":\005false\022\021\n\tnamespace\030\003 \001(\t\"A\n\025GetTableNa" +
+      "mesResponse\022(\n\013table_names\030\001 \003(\0132\023.hbase" +
+      ".pb.TableName\"?\n\024GetTableStateRequest\022\'\n" +
+      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"B" +
+      "\n\025GetTableStateResponse\022)\n\013table_state\030\001" +
+      " \002(\0132\024.hbase.pb.TableState\"\031\n\027GetCluster" +
+      "StatusRequest\"K\n\030GetClusterStatusRespons" +
+      "e\022/\n\016cluster_status\030\001 \002(\0132\027.hbase.pb.Clu",
+      "sterStatus\"\030\n\026IsMasterRunningRequest\"4\n\027" +
+      "IsMasterRunningResponse\022\031\n\021is_master_run" +
+      "ning\030\001 \002(\010\"I\n\024ExecProcedureRequest\0221\n\tpr" +
+      "ocedure\030\001 \002(\0132\036.hbase.pb.ProcedureDescri" +
+      "ption\"F\n\025ExecProcedureResponse\022\030\n\020expect" +
+      "ed_timeout\030\001 \001(\003\022\023\n\013return_data\030\002 \001(\014\"K\n" +
+      "\026IsProcedureDoneRequest\0221\n\tprocedure\030\001 \001" +
+      "(\0132\036.hbase.pb.ProcedureDescription\"`\n\027Is" +
+      "ProcedureDoneResponse\022\023\n\004done\030\001 \001(\010:\005fal" +
+      "se\0220\n\010snapshot\030\002 \001(\0132\036.hbase.pb.Procedur",
+      "eDescription\",\n\031GetProcedureResultReques" +
+      "t\022\017\n\007proc_id\030\001 \002(\004\"\371\001\n\032GetProcedureResul" +
+      "tResponse\0229\n\005state\030\001 \002(\0162*.hbase.pb.GetP" +
+      "rocedureResultResponse.State\022\022\n\nstart_ti" +
+      "me\030\002 \001(\004\022\023\n\013last_update\030\003 \001(\004\022\016\n\006result\030" +
+      "\004 \001(\014\0224\n\texception\030\005 \001(\0132!.hbase.pb.Fore" +
+      "ignExceptionMessage\"1\n\005State\022\r\n\tNOT_FOUN" +
+      "D\020\000\022\013\n\007RUNNING\020\001\022\014\n\010FINISHED\020\002\"M\n\025AbortP" +
+      "rocedureRequest\022\017\n\007proc_id\030\001 \002(\004\022#\n\025mayI" +
+      "nterruptIfRunning\030\002 \001(\010:\004true\"6\n\026AbortPr",
+      "ocedureResponse\022\034\n\024is_procedure_aborted\030" +
+      "\001 \002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListPr" +
+      "oceduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hb" +
+      "ase.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\t" +
+      "user_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tn" +
+      "amespace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hba" +
+      "se.pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016b" +
+      "ypass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031." +
+      "hbase.pb.ThrottleRequest\"\022\n\020SetQuotaResp" +
+      "onse\"J\n\037MajorCompactionTimestampRequest\022",
+      "\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName" +
+      "\"U\n(MajorCompactionTimestampForRegionReq" +
+      "uest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSp" +
+      "ecifier\"@\n MajorCompactionTimestampRespo" +
+      "nse\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Sec" +
+      "urityCapabilitiesRequest\"\354\001\n\034SecurityCap" +
+      "abilitiesResponse\022G\n\014capabilities\030\001 \003(\0162" +
+      "1.hbase.pb.SecurityCapabilitiesResponse." +
+      "Capability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTH" +
+      "ENTICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022",
+      "\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION" +
+      "\020\003\022\023\n\017CELL_VISIBILITY\020\004*(\n\020MasterSwitchT" +
+      "ype\022\t\n\005SPLIT\020\000\022\t\n\005MERGE\020\0012\374*\n\rMasterServ" +
+      "ice\022e\n\024GetSchemaAlterStatus\022%.hbase.pb.G" +
+      "etSchemaAlterStatusRequest\032&.hbase.pb.Ge" +
+      "tSchemaAlterStatusResponse\022b\n\023GetTableDe" +
+      "scriptors\022$.hbase.pb.GetTableDescriptors" +
+      "Request\032%.hbase.pb.GetTableDescriptorsRe" +
+      "sponse\022P\n\rGetTableNames\022\036.hbase.pb.GetTa" +
+      "bleNamesRequest\032\037.hbase.pb.GetTableNames",
+      "Response\022Y\n\020GetClusterStatus\022!.hbase.pb." +
+      "GetClusterStatusRequest\032\".hbase.pb.GetCl" +
+      "usterStatusResponse\022V\n\017IsMasterRunning\022 " +
+      ".hbase.pb.IsMasterRunningRequest\032!.hbase" +
+      ".pb.IsMasterRunningResponse\022D\n\tAddColumn" +
+      "\022\032.hbase.pb.AddColumnRequest\032\033.hbase.pb." +
+      "AddColumnResponse\022M\n\014DeleteColumn\022\035.hbas" +
+      "e.pb.DeleteColumnRequest\032\036.hbase.pb.Dele" +
+      "teColumnResponse\022M\n\014ModifyColumn\022\035.hbase" +
+      ".pb.ModifyColumnRequest\032\036.hbase.pb.Modif",
+      "yColumnResponse\022G\n\nMoveRegion\022\033.hbase.pb" +
+      ".MoveRegionRequest\032\034.hbase.pb.MoveRegion" +
+      "Response\022k\n\026DispatchMergingRegions\022\'.hba" +
+      "se.pb.DispatchMergingRegionsRequest\032(.hb" +
+      "ase.pb.DispatchMergingRegionsResponse\022\\\n" +
+      "\021MergeTableRegions\022\".hbase.pb.MergeTable" +
+      "RegionsRequest\032#.hbase.pb.MergeTableRegi" +
+      "onsResponse\022M\n\014AssignRegion\022\035.hbase.pb.A" +
+      "ssignRegionRequest\032\036.hbase.pb.AssignRegi" +
+      "onResponse\022S\n\016UnassignRegion\022\037.hbase.pb.",
+      "UnassignRegionRequest\032 .hbase.pb.Unassig" +
+      "nRegionResponse\022P\n\rOfflineRegion\022\036.hbase" +
+      ".pb.OfflineRegionRequest\032\037.hbase.pb.Offl" +
+      "ineRegionResponse\022J\n\013DeleteTable\022\034.hbase" +
+      ".pb.DeleteTableRequest\032\035.hbase.pb.Delete" +
+      "TableResponse\022P\n\rtruncateTable\022\036.hbase.p" +
+      "b.TruncateTableRequest\032\037.hbase.pb.Trunca" +
+      "teTableResponse\022J\n\013EnableTable\022\034.hbase.p" +
+      "b.EnableTableRequest\032\035.hbase.pb.EnableTa" +
+      "bleResponse\022M\n\014DisableTable\022\035.hbase.pb.D",
+      "isableTableRequest\032\036.hbase.pb.DisableTab" +
+      "leResponse\022J\n\013ModifyTable\022\034.hbase.pb.Mod" +
+      "ifyTableRequest\032\035.hbase.pb.ModifyTableRe" +
+      "sponse\022J\n\013CreateTable\022\034.hbase.pb.CreateT" +
+      "ableRequest\032\035.hbase.pb.CreateTableRespon" +
+      "se\022A\n\010Shutdown\022\031.hbase.pb.ShutdownReques" +
+      "t\032\032.hbase.pb.ShutdownResponse\022G\n\nStopMas" +
+      "ter\022\033.hbase.pb.StopMasterRequest\032\034.hbase" +
+      ".pb.StopMasterResponse\022h\n\031IsMasterInMain" +
+      "tenanceMode\022$.hbase.pb.IsInMaintenanceMo",
+      "deRequest\032%.hbase.pb.IsInMaintenanceMode" +
+      "Response\022>\n\007Balance\022\030.hbase.pb.BalanceRe" +
+      "quest\032\031.hbase.pb.BalanceResponse\022_\n\022SetB" +
+      "alancerRunning\022#.hbase.pb.SetBalancerRun" +
+      "ningRequest\032$.hbase.pb.SetBalancerRunnin" +
+      "gResponse\022\\\n\021IsBalancerEnabled\022\".hbase.p" +
+      "b.IsBalancerEnabledRequest\032#.hbase.pb.Is" +
+      "BalancerEnabledResponse\022k\n\026SetSplitOrMer" +
+      "geEnabled\022\'.hbase.pb.SetSplitOrMergeEnab" +
+      "ledRequest\032(.hbase.pb.SetSplitOrMergeEna",
+      "bledResponse\022h\n\025IsSplitOrMergeEnabled\022&." +
+      "hbase.pb.IsSplitOrMergeEnabledRequest\032\'." +
+      "hbase.pb.IsSplitOrMergeEnabledResponse\022D" +
+      "\n\tNormalize\022\032.hbase.pb.NormalizeRequest\032" +
+      "\033.hbase.pb.NormalizeResponse\022e\n\024SetNorma" +
+      "lizerRunning\022%.hbase.pb.SetNormalizerRun" +
+      "ningRequest\032&.hbase.pb.SetNormalizerRunn" +
+      "ingResponse\022b\n\023IsNormalizerEnabled\022$.hba" +
+      "se.pb.IsNormalizerEnabledRequest\032%.hbase" +
+      ".pb.IsNormalizerEnabledResponse\022S\n\016RunCa",
+      "talogScan\022\037.hbase.pb.RunCatalogScanReque" +
+      "st\032 .hbase.pb.RunCatalogScanResponse\022e\n\024" +
+      "EnableCatalogJanitor\022%.hbase.pb.EnableCa" +
+      "talogJanitorRequest\032&.hbase.pb.EnableCat" +
+      "alogJanitorResponse\022n\n\027IsCatalogJanitorE" +
+      "nabled\022(.hbase.pb.IsCatalogJanitorEnable" +
+      "dRequest\032).hbase.pb.IsCatalogJanitorEnab" +
+      "ledResponse\022^\n\021ExecMasterService\022#.hbase" +
+      ".pb.CoprocessorServiceRequest\032$.hbase.pb" +
+      ".CoprocessorServiceResponse\022A\n\010Snapshot\022",
+      "\031.hbase.pb.SnapshotRequest\032\032.hbase.pb.Sn" +
+      "apshotResponse\022h\n\025GetCompletedSnapshots\022" +
+      "&.hbase.pb.GetCompletedSnapshotsRequest\032" +
+      "\'.hbase.pb.GetCompletedSnapshotsResponse" +
+      "\022S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnap" +
+      "shotRequest\032 .hbase.pb.DeleteSnapshotRes" +
+      "ponse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSna" +
+      "pshotDoneRequest\032 .hbase.pb.IsSnapshotDo" +
+      "neResponse\022V\n\017RestoreSnapshot\022 .hbase.pb" +
+      ".RestoreSnapshotRequest\032!.hbase.pb.Resto",
+      "reSnapshotResponse\022P\n\rExecProcedure\022\036.hb" +
+      "ase.pb.ExecProcedureRequest\032\037.hbase.pb.E" +
+      "xecProcedureResponse\022W\n\024ExecProcedureWit" +
+      "hRet\022\036.hbase.pb.ExecProcedureRequest\032\037.h" +
+      "base.pb.ExecProcedureResponse\022V\n\017IsProce" +
+      "dureDone\022 .hbase.pb.IsProcedureDoneReque" +
+      "st\032!.hbase.pb.IsProcedureDoneResponse\022V\n" +
+      "\017ModifyNamespace\022 .hbase.pb.ModifyNamesp" +
+      "aceRequest\032!.hbase.pb.ModifyNamespaceRes" +
+      "ponse\022V\n\017CreateNamespace\022 .hbase.pb.Crea",
+      "teNamespaceRequest\032!.hbase.pb.CreateName" +
+      "spaceResponse\022V\n\017DeleteNamespace\022 .hbase" +
+      ".pb.DeleteNamespaceRequest\032!.hbase.pb.De" +
+      "leteNamespaceResponse\022k\n\026GetNamespaceDes" +
+      "criptor\022\'.hbase.pb.GetNamespaceDescripto" +
+      "rRequest\032(.hbase.pb.GetNamespaceDescript" +
+      "orResponse\022q\n\030ListNamespaceDescriptors\022)" +
+      ".hbase.pb.ListNamespaceDescriptorsReques" +
+      "t\032*.hbase.pb.ListNamespaceDescriptorsRes" +
+      "ponse\022\206\001\n\037ListTableDescriptorsByNamespac",
+      "e\0220.hbase.pb.ListTableDescriptorsByNames" +
+      "paceRequest\0321.hbase.pb.ListTableDescript" +
+      "orsByNamespaceResponse\022t\n\031ListTableNames" +
+      "ByNamespace\022*.hbase.pb.ListTableNamesByN" +
+      "amespaceRequest\032+.hbase.pb.ListTableName" +
+      "sByNamespaceResponse\022P\n\rGetTableState\022\036." +
+      "hbase.pb.GetTableStateRequest\032\037.hbase.pb" +
+      ".GetTableStateResponse\022A\n\010SetQuota\022\031.hba" +
+      "se.pb.SetQuotaRequest\032\032.hbase.pb.SetQuot" +
+      "aResponse\022x\n\037getLastMajorCompactionTimes",
+      "tamp\022).hbase.pb.MajorCompactionTimestamp" +
+      "Request\032*.hbase.pb.MajorCompactionTimest" +
+      "ampResponse\022\212\001\n(getLastMajorCompactionTi" +
+      "mestampForRegion\0222.hbase.pb.MajorCompact" +
+      "ionTimestampForRegionRequest\032*.hbase.pb." +
+      "MajorCompactionTimestampResponse\022_\n\022getP" +
+      "rocedureResult\022#.hbase.pb.GetProcedureRe" +
+      "sultRequest\032$.hbase.pb.GetProcedureResul" +
+      "tResponse\022h\n\027getSecurityCapabilities\022%.h" +
+      "base.pb.SecurityCapabilitiesRequest\032&.hb",
+      "ase.pb.SecurityCapabilitiesResponse\022S\n\016A" +
+      "bortProcedure\022\037.hbase.pb.AbortProcedureR" +
+      "equest\032 .hbase.pb.AbortProcedureResponse" +
+      "\022S\n\016ListProcedures\022\037.hbase.pb.ListProced" +
+      "uresRequest\032 .hbase.pb.ListProceduresRes" +
+      "ponse\022_\n\022AddReplicationPeer\022#.hbase.pb.A" +
+      "ddReplicationPeerRequest\032$.hbase.pb.AddR" +
+      "eplicationPeerResponse\022h\n\025RemoveReplicat" +
+      "ionPeer\022&.hbase.pb.RemoveReplicationPeer" +
+      "Request\032\'.hbase.pb.RemoveReplicationPeer",
+      "ResponseBI\n1org.apache.hadoop.hbase.shad" +
+      "ed.protobuf.generatedB\014MasterProtosH\001\210\001\001" +
+      "\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -71341,6 +71505,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor(),
           org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor(),
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor(),
         }, assigner);
     internal_static_hbase_pb_AddColumnRequest_descriptor =
       getDescriptor().getMessageTypes().get(0);
@@ -72026,6 +72191,7 @@ public final class MasterProtos {
     org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.getDescriptor();
     org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.getDescriptor();
   }
 
   // @@protoc_insertion_point(outer_class_scope)


[42/50] [abbrv] hbase git commit: HBASE-17336 get/update replication peer config requests should be routed through master

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 0a000ee..f553e8f 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -885,6 +885,14 @@ service MasterService {
   rpc DisableReplicationPeer(DisableReplicationPeerRequest)
     returns(DisableReplicationPeerResponse);
 
+  /** Return peer config for a replication peer */
+  rpc GetReplicationPeerConfig(GetReplicationPeerConfigRequest)
+    returns(GetReplicationPeerConfigResponse);
+
+  /** Update peer config for a replication peer */
+  rpc UpdateReplicationPeerConfig(UpdateReplicationPeerConfigRequest)
+    returns(UpdateReplicationPeerConfigResponse);
+
   /** Returns a list of ServerNames marked as draining. */
   rpc listDrainingRegionServers(ListDrainingRegionServersRequest)
     returns(ListDrainingRegionServersResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index 83633b3..05b6e59 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -54,3 +54,20 @@ message DisableReplicationPeerRequest {
 
 message DisableReplicationPeerResponse {
 }
+
+message GetReplicationPeerConfigRequest {
+  required string peer_id = 1;
+}
+
+message GetReplicationPeerConfigResponse {
+  required string peer_id = 1;
+  required ReplicationPeer peer_config = 2;
+}
+
+message UpdateReplicationPeerConfigRequest {
+  required string peer_id = 1;
+  required ReplicationPeer peer_config = 2;
+}
+
+message UpdateReplicationPeerConfigResponse {
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index d3b3868..c42c7b7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1909,4 +1909,46 @@ public interface MasterObserver extends Coprocessor {
   default void postDisableReplicationPeer(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       String peerId) throws IOException {
   }
+
+  /**
+   * Called before get the configured ReplicationPeerConfig for the specified peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void preGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+  }
+
+  /**
+   * Called after get the configured ReplicationPeerConfig for the specified peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void postGetReplicationPeerConfig(
+      final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId) throws IOException {
+  }
+
+  /**
+   * Called before update peerConfig for the specified peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void preUpdateReplicationPeerConfig(
+      final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+      ReplicationPeerConfig peerConfig) throws IOException {
+  }
+
+  /**
+   * Called after update peerConfig for the specified peer
+   * @param ctx
+   * @param peerId a short name that identifies the peer
+   * @throws IOException on failure
+   */
+  default void postUpdateReplicationPeerConfig(
+      final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+      ReplicationPeerConfig peerConfig) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ecaaa16..a87c38e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -3198,6 +3198,33 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
+  public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException,
+      IOException {
+    if (cpHost != null) {
+      cpHost.preGetReplicationPeerConfig(peerId);
+    }
+    final ReplicationPeerConfig peerConfig = this.replicationManager.getPeerConfig(peerId);
+    if (cpHost != null) {
+      cpHost.postGetReplicationPeerConfig(peerId);
+    }
+    return peerConfig;
+  }
+
+  @Override
+  public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException {
+    if (cpHost != null) {
+      cpHost.preUpdateReplicationPeerConfig(peerId, peerConfig);
+    }
+    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId
+        + ", config=" + peerConfig);
+    this.replicationManager.updatePeerConfig(peerId, peerConfig);
+    if (cpHost != null) {
+      cpHost.postUpdateReplicationPeerConfig(peerId, peerConfig);
+    }
+  }
+
+  @Override
   public void drainRegionServer(final ServerName server) {
     String parentZnode = getZooKeeper().znodePaths.drainingZNode;
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 0623f2b..4e3987e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -1727,4 +1727,46 @@ public class MasterCoprocessorHost
       }
     });
   }
+
+  public void preGetReplicationPeerConfig(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preGetReplicationPeerConfig(ctx, peerId);
+      }
+    });
+  }
+
+  public void postGetReplicationPeerConfig(final String peerId) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postGetReplicationPeerConfig(ctx, peerId);
+      }
+    });
+  }
+
+  public void preUpdateReplicationPeerConfig(final String peerId,
+      final ReplicationPeerConfig peerConfig) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.preUpdateReplicationPeerConfig(ctx, peerId, peerConfig);
+      }
+    });
+  }
+
+  public void postUpdateReplicationPeerConfig(final String peerId,
+      final ReplicationPeerConfig peerConfig) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver observer, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        observer.postUpdateReplicationPeerConfig(ctx, peerId, peerConfig);
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 76da838..707cb39 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -93,10 +93,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Disab
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.visibility.VisibilityController;
@@ -1695,6 +1700,34 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public GetReplicationPeerConfigResponse getReplicationPeerConfig(RpcController controller,
+      GetReplicationPeerConfigRequest request) throws ServiceException {
+    GetReplicationPeerConfigResponse.Builder response = GetReplicationPeerConfigResponse
+        .newBuilder();
+    try {
+      String peerId = request.getPeerId();
+      ReplicationPeerConfig peerConfig = master.getReplicationPeerConfig(peerId);
+      response.setPeerId(peerId);
+      response.setPeerConfig(ReplicationSerDeHelper.convert(peerConfig));
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+    return response.build();
+  }
+
+  @Override
+  public UpdateReplicationPeerConfigResponse updateReplicationPeerConfig(RpcController controller,
+      UpdateReplicationPeerConfigRequest request) throws ServiceException {
+    try {
+      master.updateReplicationPeerConfig(request.getPeerId(),
+        ReplicationSerDeHelper.convert(request.getPeerConfig()));
+      return UpdateReplicationPeerConfigResponse.newBuilder().build();
+    } catch (ReplicationException | IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public ListDrainingRegionServersResponse listDrainingRegionServers(RpcController controller,
       ListDrainingRegionServersRequest request) throws ServiceException {
     ListDrainingRegionServersResponse.Builder response =

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 869e7ac..7686fc8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -446,6 +446,22 @@ public interface MasterServices extends Server {
   void disableReplicationPeer(String peerId) throws ReplicationException, IOException;
 
   /**
+   * Returns the configured ReplicationPeerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @return ReplicationPeerConfig for the peer
+   */
+  ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException,
+      IOException;
+
+  /**
+   * Update the peerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig new config for the peer
+   */
+  void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException;
+
+  /**
    * Mark a region server as draining to prevent additional regions from getting assigned to it.
    * @param server Region servers to drain.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
index 8c13718..f00730d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -24,6 +24,7 @@ import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -66,11 +67,13 @@ public class ReplicationManager {
       throws ReplicationException {
     checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
       peerConfig.getTableCFsMap());
-    this.replicationPeers.registerPeer(peerId, peerConfig);
+    replicationPeers.registerPeer(peerId, peerConfig);
+    replicationPeers.peerConnected(peerId);
   }
 
   public void removeReplicationPeer(String peerId) throws ReplicationException {
-    this.replicationPeers.unregisterPeer(peerId);
+    replicationPeers.peerDisconnected(peerId);
+    replicationPeers.unregisterPeer(peerId);
   }
 
   public void enableReplicationPeer(String peerId) throws ReplicationException {
@@ -81,6 +84,22 @@ public class ReplicationManager {
     this.replicationPeers.disablePeer(peerId);
   }
 
+  public ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationException,
+      ReplicationPeerNotFoundException {
+    ReplicationPeerConfig peerConfig = replicationPeers.getReplicationPeerConfig(peerId);
+    if (peerConfig == null) {
+      throw new ReplicationPeerNotFoundException(peerId);
+    }
+    return peerConfig;
+  }
+
+  public void updatePeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException {
+    checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
+      peerConfig.getTableCFsMap());
+    this.replicationPeers.updatePeerConfig(peerId, peerConfig);
+  }
+
   /**
    * Set a namespace in the peer config means that all tables in this namespace
    * will be replicated to the peer cluster.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index eaa0611..0564ece 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -2720,4 +2720,17 @@ public class AccessController extends BaseMasterAndRegionObserver
       String peerId) throws IOException {
     requirePermission(getActiveUser(ctx), "disableReplicationPeer", Action.ADMIN);
   }
+
+  @Override
+  public void preGetReplicationPeerConfig(final ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String peerId) throws IOException {
+    requirePermission(getActiveUser(ctx), "getReplicationPeerConfig", Action.ADMIN);
+  }
+
+  @Override
+  public void preUpdateReplicationPeerConfig(
+      final ObserverContext<MasterCoprocessorEnvironment> ctx, String peerId,
+      ReplicationPeerConfig peerConfig) throws IOException {
+    requirePermission(getActiveUser(ctx), "updateReplicationPeerConfig", Action.ADMIN);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
index 0655a0f..9ecc9eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ServerRegionReplicaUtil.java
@@ -20,14 +20,18 @@ package org.apache.hadoop.hbase.util;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.replication.ReplicationAdmin;
 import org.apache.hadoop.hbase.io.HFileLink;
 import org.apache.hadoop.hbase.io.Reference;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
 import org.apache.hadoop.hbase.replication.ReplicationException;
@@ -40,6 +44,8 @@ import org.apache.hadoop.hbase.zookeeper.ZKConfig;
  */
 public class ServerRegionReplicaUtil extends RegionReplicaUtil {
 
+  private static final Log LOG = LogFactory.getLog(ServerRegionReplicaUtil.class);
+
   /**
    * Whether asynchronous WAL replication to the secondary region replicas is enabled or not.
    * If this is enabled, a replication peer named "region_replica_replication" will be created
@@ -143,9 +149,18 @@ public class ServerRegionReplicaUtil extends RegionReplicaUtil {
       return;
     }
     ReplicationAdmin repAdmin = new ReplicationAdmin(conf);
+    ReplicationPeerConfig peerConfig = null;
+    try {
+      peerConfig = repAdmin.getPeerConfig(REGION_REPLICA_REPLICATION_PEER);
+    } catch (ReplicationPeerNotFoundException e) {
+      LOG.warn("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER
+          + " not exist", e);
+    }
     try {
-      if (repAdmin.getPeerConfig(REGION_REPLICA_REPLICATION_PEER) == null) {
-        ReplicationPeerConfig peerConfig = new ReplicationPeerConfig();
+      if (peerConfig == null) {
+        LOG.info("Region replica replication peer id=" + REGION_REPLICA_REPLICATION_PEER
+            + " not exist. Creating...");
+        peerConfig = new ReplicationPeerConfig();
         peerConfig.setClusterKey(ZKConfig.getZooKeeperClusterKey(conf));
         peerConfig.setReplicationEndpointImpl(RegionReplicaReplicationEndpoint.class.getName());
         repAdmin.addPeer(REGION_REPLICA_REPLICATION_PEER, peerConfig, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 10c73a6..15d5150 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.client.replication;
 
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -448,7 +449,7 @@ public class TestReplicationAdmin {
       admin.updatePeerConfig(ID_ONE, rpc);
       fail("Should throw ReplicationException, because table " + tab1 + " conflict with namespace "
           + ns1);
-    } catch (ReplicationException e) {
+    } catch (IOException e) {
       // OK
     }
 
@@ -465,7 +466,7 @@ public class TestReplicationAdmin {
       admin.updatePeerConfig(ID_ONE, rpc);
       fail("Should throw ReplicationException, because namespace " + ns2 + " conflict with table "
           + tab2);
-    } catch (ReplicationException e) {
+    } catch (IOException e) {
       // OK
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 2a5be12..36dc2e2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -414,4 +414,15 @@ public class MockNoopMasterServices implements MasterServices, Server {
   public void removeDrainFromRegionServer(ServerName servers) {
     return;
   }
+
+  @Override
+  public ReplicationPeerConfig getReplicationPeerConfig(String peerId) throws ReplicationException,
+      IOException {
+    return null;
+  }
+
+  @Override
+  public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
index 5147339..11df7e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestRegionReplicaReplicationEndpoint.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
 import org.apache.hadoop.hbase.client.ClusterConnection;
@@ -111,14 +112,27 @@ public class TestRegionReplicaReplicationEndpoint {
     ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
     String peerId = "region_replica_replication";
 
-    if (admin.getPeerConfig(peerId) != null) {
+    ReplicationPeerConfig peerConfig = null;
+    try {
+      peerConfig = admin.getPeerConfig(peerId);
+    } catch (ReplicationPeerNotFoundException e) {
+      LOG.warn("Region replica replication peer id=" + peerId + " not exist", e);
+    }
+
+    if (peerConfig != null) {
       admin.removePeer(peerId);
+      peerConfig = null;
     }
 
     HTableDescriptor htd = HTU.createTableDescriptor(
       "testReplicationPeerIsCreated_no_region_replicas");
     HTU.getHBaseAdmin().createTable(htd);
-    ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
+    try {
+      peerConfig = admin.getPeerConfig(peerId);
+      fail("Should throw ReplicationException, because replication peer id=" + peerId
+          + " not exist");
+    } catch (ReplicationPeerNotFoundException e) {
+    }
     assertNull(peerConfig);
 
     htd = HTU.createTableDescriptor("testReplicationPeerIsCreated");
@@ -142,8 +156,16 @@ public class TestRegionReplicaReplicationEndpoint {
     ReplicationAdmin admin = new ReplicationAdmin(HTU.getConfiguration());
     String peerId = "region_replica_replication";
 
-    if (admin.getPeerConfig(peerId) != null) {
+    ReplicationPeerConfig peerConfig = null;
+    try {
+      peerConfig = admin.getPeerConfig(peerId);
+    } catch (ReplicationPeerNotFoundException e) {
+      LOG.warn("Region replica replication peer id=" + peerId + " not exist", e);
+    }
+
+    if (peerConfig != null) {
       admin.removePeer(peerId);
+      peerConfig = null;
     }
 
     HTableDescriptor htd
@@ -151,7 +173,12 @@ public class TestRegionReplicaReplicationEndpoint {
     HTU.getHBaseAdmin().createTable(htd);
 
     // assert that replication peer is not created yet
-    ReplicationPeerConfig peerConfig = admin.getPeerConfig(peerId);
+    try {
+      peerConfig = admin.getPeerConfig(peerId);
+      fail("Should throw ReplicationException, because replication peer id=" + peerId
+          + " not exist");
+    } catch (ReplicationPeerNotFoundException e) {
+    }
     assertNull(peerConfig);
 
     HTU.getHBaseAdmin().disableTable(htd.getTableName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 956eadf..7107669 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -113,6 +113,7 @@ import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.ScanType;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
@@ -2930,4 +2931,34 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(action, SUPERUSER, USER_ADMIN);
     verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
+
+  @Test
+  public void testGetReplicationPeerConfig() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preGetReplicationPeerConfig(
+          ObserverContext.createAndPrepare(CP_ENV, null), "test");
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
+
+  @Test
+  public void testUpdateReplicationPeerConfig() throws Exception {
+    AccessTestAction action = new AccessTestAction() {
+      @Override
+      public Object run() throws Exception {
+        ACCESS_CONTROLLER.preUpdateReplicationPeerConfig(
+          ObserverContext.createAndPrepare(CP_ENV, null), "test", new ReplicationPeerConfig());
+        return null;
+      }
+    };
+
+    verifyAllowed(action, SUPERUSER, USER_ADMIN);
+    verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 5fd23d3..3c94db2 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -293,7 +293,7 @@ module Hbase
       data = args.fetch(DATA, nil)
 
       # Create and populate a ReplicationPeerConfig
-      replication_peer_config = ReplicationPeerConfig.new
+      replication_peer_config = get_peer_config(id)
       unless config.nil?
         replication_peer_config.get_configuration.put_all(config)
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/0e486656/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index afe7b57..e902618 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -120,6 +120,8 @@ In case the table goes out of date, the unit tests which check for accuracy of p
 |        | removeReplicationPeer | superuser\|global(A)
 |        | enableReplicationPeer | superuser\|global(A)
 |        | disableReplicationPeer | superuser\|global(A)
+|        | getReplicationPeerConfig | superuser\|global(A)
+|        | updateReplicationPeerConfig | superuser\|global(A)
 | Region | openRegion | superuser\|global(A)
 |        | closeRegion | superuser\|global(A)
 |        | flush | superuser\|global(A)\|global\(C)\|TableOwner\|table(A)\|table\(C)


[46/50] [abbrv] hbase git commit: HBASE-17346 AggregationClient cleanup

Posted by sy...@apache.org.
HBASE-17346 AggregationClient cleanup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0a93241b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0a93241b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0a93241b

Branch: refs/heads/hbase-12439
Commit: 0a93241b61e6183b5671a4e7940e6212a17acd66
Parents: 521730e
Author: Michael Stack <st...@apache.org>
Authored: Sun Jan 1 16:01:10 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Sun Jan 1 16:01:10 2017 -0800

----------------------------------------------------------------------
 .../client/coprocessor/AggregationClient.java   | 94 +++++++++++++++-----
 1 file changed, 71 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0a93241b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index cde7d41..d236342 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -49,7 +49,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
-import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateRequest;
 import org.apache.hadoop.hbase.protobuf.generated.AggregateProtos.AggregateResponse;
@@ -59,6 +58,8 @@ import org.apache.hadoop.hbase.util.Pair;
 
 import com.google.protobuf.ByteString;
 import com.google.protobuf.Message;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
 
 /**
  * This client class is for invoking the aggregate functions deployed on the
@@ -81,13 +82,60 @@ import com.google.protobuf.Message;
  * </ul>
  * <p>Call {@link #close()} when done.
  */
-@InterfaceAudience.Private
+@InterfaceAudience.Public
 public class AggregationClient implements Closeable {
   // TODO: This class is not used.  Move to examples?
   private static final Log log = LogFactory.getLog(AggregationClient.class);
   private final Connection connection;
 
   /**
+   * An RpcController implementation for use here in this endpoint.
+   */
+  static class AggregationClientRpcController implements RpcController {
+    private String errorText;
+    private boolean cancelled = false;
+    private boolean failed = false;
+
+    @Override
+    public String errorText() {
+      return this.errorText;
+    }
+
+    @Override
+    public boolean failed() {
+      return this.failed;
+    }
+
+    @Override
+    public boolean isCanceled() {
+      return this.cancelled;
+    }
+
+    @Override
+    public void notifyOnCancel(RpcCallback<Object> arg0) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public void reset() {
+      this.errorText = null;
+      this.cancelled = false;
+      this.failed = false;
+    }
+
+    @Override
+    public void setFailed(String errorText) {
+      this.failed = true;
+      this.errorText = errorText;
+    }
+
+    @Override
+    public void startCancel() {
+      this.cancelled = true;
+    }
+  }
+
+  /**
    * Constructor with Conf object
    * @param cfg
    */
@@ -160,13 +208,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, R>() {
           @Override
           public R call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMax(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -248,13 +296,13 @@ public class AggregationClient implements Closeable {
 
           @Override
           public R call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMin(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() > 0) {
               ByteString b = response.getFirstPart(0);
@@ -323,13 +371,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Long>() {
           @Override
           public Long call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getRowNum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             byte[] bytes = getBytesFromResponse(response.getFirstPart(0));
             ByteBuffer bb = ByteBuffer.allocate(8).put(bytes);
@@ -388,14 +436,14 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, S>() {
           @Override
           public S call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             // Not sure what is going on here why I have to do these casts. TODO.
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getSum(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             if (response.getFirstPartCount() == 0) {
               return null;
@@ -456,13 +504,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<S, Long>>() {
           @Override
           public Pair<S, Long> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getAvg(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             Pair<S, Long> pair = new Pair<S, Long>(null, 0L);
             if (response.getFirstPartCount() == 0) {
@@ -560,13 +608,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, Pair<List<S>, Long>>() {
           @Override
           public Pair<List<S>, Long> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getStd(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
             Pair<List<S>, Long> pair = new Pair<List<S>, Long>(new ArrayList<S>(), 0L);
             if (response.getFirstPartCount() == 0) {
@@ -676,13 +724,13 @@ public class AggregationClient implements Closeable {
         new Batch.Call<AggregateService, List<S>>() {
           @Override
           public List<S> call(AggregateService instance) throws IOException {
-            ServerRpcController controller = new ServerRpcController();
+            RpcController controller = new AggregationClientRpcController();
             CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse> rpcCallback =
                 new CoprocessorRpcUtils.BlockingRpcCallback<AggregateResponse>();
             instance.getMedian(controller, requestArg, rpcCallback);
             AggregateResponse response = rpcCallback.get();
-            if (controller.failedOnException()) {
-              throw controller.getFailedOn();
+            if (controller.failed()) {
+              throw new IOException(controller.errorText());
             }
 
             List<S> list = new ArrayList<S>();


[04/50] [abbrv] hbase git commit: HBASE-17341 Add a timeout during replication endpoint termination (Vincent Poon)

Posted by sy...@apache.org.
HBASE-17341 Add a timeout during replication endpoint termination (Vincent Poon)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cac0904c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cac0904c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cac0904c

Branch: refs/heads/hbase-12439
Commit: cac0904c16dde9eb7bdbb57e4a33224dd4edb77f
Parents: e1f4aae
Author: tedyu <yu...@gmail.com>
Authored: Wed Dec 21 08:26:22 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Dec 21 08:26:22 2016 -0800

----------------------------------------------------------------------
 .../regionserver/ReplicationSource.java         |  6 ++-
 .../replication/TestReplicationSource.java      | 54 ++++++++++++++++++++
 2 files changed, 58 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cac0904c/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 97368e6..3fb5f94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -430,9 +430,11 @@ public class ReplicationSource extends Thread
       }
       if (future != null) {
         try {
-          future.get();
+          future.get(sleepForRetries * maxRetriesMultiplier, TimeUnit.MILLISECONDS);
         } catch (Exception e) {
-          LOG.warn("Got exception:" + e);
+          LOG.warn("Got exception while waiting for endpoint to shutdown for replication source :"
+              + this.peerClusterZnode,
+            e);
         }
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cac0904c/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
index 375a866..abdd68a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSource.java
@@ -21,12 +21,18 @@ package org.apache.hadoop.hbase.replication;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
@@ -36,6 +42,8 @@ import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.replication.regionserver.HBaseInterClusterReplicationEndpoint;
+import org.apache.hadoop.hbase.replication.regionserver.ReplicationSource;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -43,6 +51,8 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import static org.mockito.Mockito.mock;
+
 @Category({ReplicationTests.class, MediumTests.class})
 public class TestReplicationSource {
 
@@ -111,5 +121,49 @@ public class TestReplicationSource {
     reader.close();
   }
 
+  /**
+   * Tests that {@link ReplicationSource#terminate(String)} will timeout properly
+   */
+  @Test
+  public void testTerminateTimeout() throws Exception {
+    ReplicationSource source = new ReplicationSource();
+    ReplicationEndpoint replicationEndpoint = new HBaseInterClusterReplicationEndpoint() {
+      @Override
+      protected void doStart() {
+        notifyStarted();
+      }
+
+      @Override
+      protected void doStop() {
+        // not calling notifyStopped() here causes the caller of stop() to get a Future that never
+        // completes
+      }
+    };
+    replicationEndpoint.start();
+    ReplicationPeers mockPeers = mock(ReplicationPeers.class);
+    Configuration testConf = HBaseConfiguration.create();
+    testConf.setInt("replication.source.maxretriesmultiplier", 1);
+    source.init(testConf, null, null, null, mockPeers, null, "testPeer", null,
+      replicationEndpoint, null);
+    ExecutorService executor = Executors.newSingleThreadExecutor();
+    Future<?> future = executor.submit(new Runnable() {
+
+      @Override
+      public void run() {
+        source.terminate("testing source termination");
+      }
+    });
+    long sleepForRetries = testConf.getLong("replication.source.sleepforretries", 1000);
+    Waiter.waitFor(testConf, sleepForRetries * 2, new Predicate<Exception>() {
+
+      @Override
+      public boolean evaluate() throws Exception {
+        return future.isDone();
+      }
+
+    });
+
+  }
+
 }
 


[31/50] [abbrv] hbase git commit: HBASE-17371 Enhance 'HBaseContextSuite @ distributedScan to test HBase client' with filter

Posted by sy...@apache.org.
HBASE-17371 Enhance 'HBaseContextSuite @ distributedScan to test HBase client' with filter


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ccb8d671
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ccb8d671
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ccb8d671

Branch: refs/heads/hbase-12439
Commit: ccb8d671d590f4ea347fb85049f84620564ce1cb
Parents: 5ffbd4a
Author: tedyu <yu...@gmail.com>
Authored: Tue Dec 27 15:44:18 2016 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Tue Dec 27 15:44:18 2016 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/spark/HBaseContextSuite.scala   | 12 ++++++++++++
 1 file changed, 12 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ccb8d671/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala
index b27cfc7..1e1e52d 100644
--- a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala
+++ b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseContextSuite.scala
@@ -17,6 +17,7 @@
 package org.apache.hadoop.hbase.spark
 
 import org.apache.hadoop.hbase.client._
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter
 import org.apache.hadoop.hbase.util.Bytes
 import org.apache.hadoop.hbase.{ CellUtil, TableName, HBaseTestingUtility}
 import org.apache.spark.{SparkException, Logging, SparkContext}
@@ -311,6 +312,9 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
       put = new Put(Bytes.toBytes("scan2"))
       put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo2"))
       table.put(put)
+      put = new Put(Bytes.toBytes("scan2"))
+      put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes("b"), Bytes.toBytes("foo-2"))
+      table.put(put)
       put = new Put(Bytes.toBytes("scan3"))
       put.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes("a"), Bytes.toBytes("foo3"))
       table.put(put)
@@ -328,15 +332,23 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
     val hbaseContext = new HBaseContext(sc, config)
 
     val scan = new Scan()
+    val filter = new FirstKeyOnlyFilter()
     scan.setCaching(100)
     scan.setStartRow(Bytes.toBytes("scan2"))
     scan.setStopRow(Bytes.toBytes("scan4_"))
+    scan.setFilter(filter)
 
     val scanRdd = hbaseContext.hbaseRDD(TableName.valueOf(tableName), scan)
 
     try {
       val scanList = scanRdd.map(r => r._1.copyBytes()).collect()
       assert(scanList.length == 3)
+      var cnt = 0
+      scanRdd.map(r => r._2.listCells().size()).collect().foreach(l => {
+        cnt += l
+      })
+      // the number of cells returned would be 4 without the Filter
+      assert(cnt == 3);
     } catch {
       case ex: Exception => ex.printStackTrace()
     }


[25/50] [abbrv] hbase git commit: HBASE-17348 Remove the unused hbase.replication from javadoc/comment/book completely

Posted by sy...@apache.org.
HBASE-17348 Remove the unused hbase.replication from javadoc/comment/book completely


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8da7366f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8da7366f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8da7366f

Branch: refs/heads/hbase-12439
Commit: 8da7366fc24d633ea98f9ccac1599aa147fbf7fe
Parents: 8cb55c4
Author: Guanghao Zhang <zg...@apache.org>
Authored: Sun Dec 25 08:46:29 2016 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Sun Dec 25 08:46:29 2016 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/replication/ReplicationAdmin.java | 4 +---
 .../hadoop/hbase/shaded/protobuf/generated/AdminProtos.java      | 4 ++--
 hbase-protocol-shaded/src/main/protobuf/Admin.proto              | 2 +-
 .../org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java  | 4 ++--
 hbase-protocol/src/main/protobuf/Admin.proto                     | 2 +-
 hbase-shell/src/main/ruby/shell.rb                               | 1 -
 src/main/asciidoc/_chapters/ops_mgt.adoc                         | 1 -
 7 files changed, 7 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index 12bdb81..2d6c37b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -62,9 +62,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 /**
  * <p>
  * This class provides the administrative interface to HBase cluster
- * replication. In order to use it, the cluster and the client using
- * ReplicationAdmin must be configured with <code>hbase.replication</code>
- * set to true.
+ * replication.
  * </p>
  * <p>
  * Adding a new peer results in creating new outbound connections from every

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 576c739..654d152 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -18964,7 +18964,7 @@ public final class AdminProtos {
    **
    * Replicates the given entries. The guarantee is that the given entries
    * will be durable on the slave cluster if this method returns without
-   * any exception.  hbase.replication has to be set to true for this to work.
+   * any exception.
    * </pre>
    *
    * Protobuf type {@code hbase.pb.ReplicateWALEntryRequest}
@@ -19427,7 +19427,7 @@ public final class AdminProtos {
      **
      * Replicates the given entries. The guarantee is that the given entries
      * will be durable on the slave cluster if this method returns without
-     * any exception.  hbase.replication has to be set to true for this to work.
+     * any exception.
      * </pre>
      *
      * Protobuf type {@code hbase.pb.ReplicateWALEntryRequest}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 36221c24..e8cf10c 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -219,7 +219,7 @@ message WALEntry {
 /**
  * Replicates the given entries. The guarantee is that the given entries
  * will be durable on the slave cluster if this method returns without
- * any exception.  hbase.replication has to be set to true for this to work.
+ * any exception.
  */
 message ReplicateWALEntryRequest {
   repeated WALEntry entry = 1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
index 9deba2e..c8f8be9 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/AdminProtos.java
@@ -16949,7 +16949,7 @@ public final class AdminProtos {
    **
    * Replicates the given entries. The guarantee is that the given entries
    * will be durable on the slave cluster if this method returns without
-   * any exception.  hbase.replication has to be set to true for this to work.
+   * any exception.
    * </pre>
    */
   public static final class ReplicateWALEntryRequest extends
@@ -17439,7 +17439,7 @@ public final class AdminProtos {
      **
      * Replicates the given entries. The guarantee is that the given entries
      * will be durable on the slave cluster if this method returns without
-     * any exception.  hbase.replication has to be set to true for this to work.
+     * any exception.
      * </pre>
      */
     public static final class Builder extends

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-protocol/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Admin.proto b/hbase-protocol/src/main/protobuf/Admin.proto
index e905340..6096966 100644
--- a/hbase-protocol/src/main/protobuf/Admin.proto
+++ b/hbase-protocol/src/main/protobuf/Admin.proto
@@ -206,7 +206,7 @@ message WALEntry {
 /**
  * Replicates the given entries. The guarantee is that the given entries
  * will be durable on the slave cluster if this method returns without
- * any exception.  hbase.replication has to be set to true for this to work.
+ * any exception.
  */
 message ReplicateWALEntryRequest {
   repeated WALEntry entry = 1;

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 4b111f1..2c9ab72 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -363,7 +363,6 @@ Shell.load_command_group(
 Shell.load_command_group(
   'replication',
   :full_name => 'CLUSTER REPLICATION TOOLS',
-  :comment => "In order to use these tools, hbase.replication must be true.",
   :commands => %w[
     add_peer
     remove_peer

http://git-wip-us.apache.org/repos/asf/hbase/blob/8da7366f/src/main/asciidoc/_chapters/ops_mgt.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc b/src/main/asciidoc/_chapters/ops_mgt.adoc
index a580bf8..b156ee5 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -1358,7 +1358,6 @@ Instead of SQL statements, entire WALEdits (consisting of multiple cell inserts
   Create tables with the same names and column families on both the source and destination clusters, so that the destination cluster knows where to store data it will receive.
 . All hosts in the source and destination clusters should be reachable to each other.
 . If both clusters use the same ZooKeeper cluster, you must use a different `zookeeper.znode.parent`, because they cannot write in the same folder.
-. Check to be sure that replication has not been disabled. `hbase.replication` defaults to `true`.
 . On the source cluster, in HBase Shell, add the destination cluster as a peer, using the `add_peer` command.
 . On the source cluster, in HBase Shell, enable the table replication, using the `enable_table_replication` command.
 . Check the logs to see if replication is taking place. If so, you will see messages like the following, coming from the ReplicationSource.


[23/50] [abbrv] hbase git commit: HBASE-17174 Refactor the AsyncProcess, BufferedMutatorImpl, and HTable

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
new file mode 100644
index 0000000..7e9c968
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestController.java
@@ -0,0 +1,125 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import java.io.InterruptedIOException;
+import java.util.Collection;
+import java.util.function.Consumer;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * An interface for client request scheduling algorithm.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface RequestController {
+
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public enum ReturnCode {
+    /**
+     * Accept current row.
+     */
+    INCLUDE,
+    /**
+     * Skip current row.
+     */
+    SKIP,
+    /**
+     * No more row can be included.
+     */
+    END
+  }
+
+  /**
+   * Picks up the valid data.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public interface Checker {
+    /**
+     * Checks the data whether it is valid to submit.
+     * @param loc the destination of data
+     * @param row the data to check
+     * @return describe the decision for the row
+     */
+    ReturnCode canTakeRow(HRegionLocation loc, Row row);
+
+    /**
+     * Reset the state of the scheduler when completing the iteration of rows.
+     * @throws InterruptedIOException some controller may wait
+     * for some busy region or RS to complete the undealt request.
+     */
+    void reset() throws InterruptedIOException ;
+  }
+
+  /**
+   * @return A new checker for evaluating a batch rows.
+   */
+  Checker newChecker();
+
+  /**
+   * Increment the counter if we build a valid task.
+   * @param regions The destination of task
+   * @param sn The target server
+   */
+  void incTaskCounters(Collection<byte[]> regions, ServerName sn);
+
+  /**
+   * Decrement the counter if a task is accomplished.
+   * @param regions The destination of task
+   * @param sn The target server
+   */
+  void decTaskCounters(Collection<byte[]> regions, ServerName sn);
+
+  /**
+   * @return The number of running task.
+   */
+  long getNumberOfTsksInProgress();
+
+  /**
+   * Waits for the running tasks to complete.
+   * If there are specified threshold and trigger, the implementation should
+   * wake up once in a while for checking the threshold and calling trigger.
+   * @param max This method will return if the number of running tasks is
+   * less than or equal to max.
+   * @param id the caller's id
+   * @param periodToTrigger The period to invoke the trigger. This value is a
+   * hint. The real period depends on the implementation.
+   * @param trigger The object to call periodically.
+   * @throws java.io.InterruptedIOException If the waiting is interrupted
+   */
+  void waitForMaximumCurrentTasks(long max, long id,
+    int periodToTrigger, Consumer<Long> trigger) throws InterruptedIOException;
+
+  /**
+   * Wait until there is at least one slot for a new task.
+   * @param id the caller's id
+   * @param periodToTrigger The period to invoke the trigger. This value is a
+   * hint. The real period depends on the implementation.
+   * @param trigger The object to call periodically.
+   * @throws java.io.InterruptedIOException If the waiting is interrupted
+   */
+  void waitForFreeSlot(long id, int periodToTrigger,
+          Consumer<Long> trigger) throws InterruptedIOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java
new file mode 100644
index 0000000..7ed80f0
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RequestControllerFactory.java
@@ -0,0 +1,44 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.ReflectionUtils;
+
+/**
+ * A factory class that constructs an {@link org.apache.hadoop.hbase.client.RequestController}.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public final class RequestControllerFactory {
+  public static final String REQUEST_CONTROLLER_IMPL_CONF_KEY = "hbase.client.request.controller.impl";
+  /**
+   * Constructs a {@link org.apache.hadoop.hbase.client.RequestController}.
+   * @param conf The {@link Configuration} to use.
+   * @return A RequestController which is built according to the configuration.
+   */
+  public static RequestController create(Configuration conf) {
+    Class<? extends RequestController> clazz= conf.getClass(REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class, RequestController.class);
+    return ReflectionUtils.newInstance(clazz, conf);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java
index 788f1a4..85fd590 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowAccess.java
@@ -30,8 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-@VisibleForTesting
-interface RowAccess<T> extends Iterable<T> {
+public interface RowAccess<T> extends Iterable<T> {
   /**
    * @return true if there are no elements.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
new file mode 100644
index 0000000..473f264
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SimpleRequestController.java
@@ -0,0 +1,519 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import com.google.common.annotations.VisibleForTesting;
+import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Consumer;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.util.Bytes;
+import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
+import org.apache.hadoop.hbase.util.EnvironmentEdge;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * Holds back the request if the submitted size or number has reached the
+ * threshold.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class SimpleRequestController implements RequestController {
+  private static final Log LOG = LogFactory.getLog(SimpleRequestController.class);
+  /**
+   * The maximum size of single RegionServer.
+   */
+  public static final String HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = "hbase.client.max.perrequest.heapsize";
+
+  /**
+   * Default value of #HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE
+   */
+  @VisibleForTesting
+  static final long DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE = 4194304;
+
+  /**
+   * The maximum size of submit.
+   */
+  public static final String HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = "hbase.client.max.submit.heapsize";
+  /**
+   * Default value of #HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE
+   */
+  @VisibleForTesting
+  static final long DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE = DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE;
+  @VisibleForTesting
+  final AtomicLong tasksInProgress = new AtomicLong(0);
+  @VisibleForTesting
+  final ConcurrentMap<byte[], AtomicInteger> taskCounterPerRegion
+          = new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
+  @VisibleForTesting
+  final ConcurrentMap<ServerName, AtomicInteger> taskCounterPerServer = new ConcurrentHashMap<>();
+  /**
+   * The number of tasks simultaneously executed on the cluster.
+   */
+  private final int maxTotalConcurrentTasks;
+
+  /**
+   * The max heap size of all tasks simultaneously executed on a server.
+   */
+  private final long maxHeapSizePerRequest;
+  private final long maxHeapSizeSubmit;
+  /**
+   * The number of tasks we run in parallel on a single region. With 1 (the
+   * default) , we ensure that the ordering of the queries is respected: we
+   * don't start a set of operations on a region before the previous one is
+   * done. As well, this limits the pressure we put on the region server.
+   */
+  @VisibleForTesting
+  final int maxConcurrentTasksPerRegion;
+
+  /**
+   * The number of task simultaneously executed on a single region server.
+   */
+  @VisibleForTesting
+  final int maxConcurrentTasksPerServer;
+  private final int thresholdToLogUndoneTaskDetails;
+  public static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS =
+      "hbase.client.threshold.log.details";
+  private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10;
+  public static final String THRESHOLD_TO_LOG_REGION_DETAILS =
+      "hbase.client.threshold.log.region.details";
+  private static final int DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS = 2;
+  private final int thresholdToLogRegionDetails;
+  SimpleRequestController(final Configuration conf) {
+    this.maxTotalConcurrentTasks = conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
+            HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
+    this.maxConcurrentTasksPerServer = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
+            HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS);
+    this.maxConcurrentTasksPerRegion = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
+            HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS);
+    this.maxHeapSizePerRequest = conf.getLong(HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
+            DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
+    this.maxHeapSizeSubmit = conf.getLong(HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE, DEFAULT_HBASE_CLIENT_MAX_SUBMIT_HEAPSIZE);
+    this.thresholdToLogUndoneTaskDetails =
+        conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS,
+          DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
+    this.thresholdToLogRegionDetails =
+        conf.getInt(THRESHOLD_TO_LOG_REGION_DETAILS,
+          DEFAULT_THRESHOLD_TO_LOG_REGION_DETAILS);
+    if (this.maxTotalConcurrentTasks <= 0) {
+      throw new IllegalArgumentException("maxTotalConcurrentTasks=" + maxTotalConcurrentTasks);
+    }
+    if (this.maxConcurrentTasksPerServer <= 0) {
+      throw new IllegalArgumentException("maxConcurrentTasksPerServer="
+              + maxConcurrentTasksPerServer);
+    }
+    if (this.maxConcurrentTasksPerRegion <= 0) {
+      throw new IllegalArgumentException("maxConcurrentTasksPerRegion="
+              + maxConcurrentTasksPerRegion);
+    }
+    if (this.maxHeapSizePerRequest <= 0) {
+      throw new IllegalArgumentException("maxHeapSizePerServer="
+              + maxHeapSizePerRequest);
+    }
+
+    if (this.maxHeapSizeSubmit <= 0) {
+      throw new IllegalArgumentException("maxHeapSizeSubmit="
+              + maxHeapSizeSubmit);
+    }
+  }
+
+  @VisibleForTesting
+  static Checker newChecker(List<RowChecker> checkers) {
+    return new Checker() {
+      private boolean isEnd = false;
+
+      @Override
+      public ReturnCode canTakeRow(HRegionLocation loc, Row row) {
+        if (isEnd) {
+          return ReturnCode.END;
+        }
+        long rowSize = (row instanceof Mutation) ? ((Mutation) row).heapSize() : 0;
+        ReturnCode code = ReturnCode.INCLUDE;
+        for (RowChecker checker : checkers) {
+          switch (checker.canTakeOperation(loc, rowSize)) {
+            case END:
+              isEnd = true;
+              code = ReturnCode.END;
+              break;
+            case SKIP:
+              code = ReturnCode.SKIP;
+              break;
+            case INCLUDE:
+            default:
+              break;
+          }
+          if (code == ReturnCode.END) {
+            break;
+          }
+        }
+        for (RowChecker checker : checkers) {
+          checker.notifyFinal(code, loc, rowSize);
+        }
+        return code;
+      }
+
+      @Override
+      public void reset() throws InterruptedIOException {
+        isEnd = false;
+        InterruptedIOException e = null;
+        for (RowChecker checker : checkers) {
+          try {
+            checker.reset();
+          } catch (InterruptedIOException ex) {
+            e = ex;
+          }
+        }
+        if (e != null) {
+          throw e;
+        }
+      }
+    };
+  }
+
+  @Override
+  public Checker newChecker() {
+    List<RowChecker> checkers = new ArrayList<>(3);
+    checkers.add(new TaskCountChecker(maxTotalConcurrentTasks,
+            maxConcurrentTasksPerServer,
+            maxConcurrentTasksPerRegion,
+            tasksInProgress,
+            taskCounterPerServer,
+            taskCounterPerRegion));
+    checkers.add(new RequestSizeChecker(maxHeapSizePerRequest));
+    checkers.add(new SubmittedSizeChecker(maxHeapSizeSubmit));
+    return newChecker(checkers);
+  }
+
+  @Override
+  public void incTaskCounters(Collection<byte[]> regions, ServerName sn) {
+    tasksInProgress.incrementAndGet();
+
+    computeIfAbsent(taskCounterPerServer, sn, AtomicInteger::new).incrementAndGet();
+
+    regions.forEach((regBytes)
+            -> computeIfAbsent(taskCounterPerRegion, regBytes, AtomicInteger::new).incrementAndGet()
+    );
+  }
+
+  @Override
+  public void decTaskCounters(Collection<byte[]> regions, ServerName sn) {
+    regions.forEach(regBytes -> {
+      AtomicInteger regionCnt = taskCounterPerRegion.get(regBytes);
+      regionCnt.decrementAndGet();
+    });
+
+    taskCounterPerServer.get(sn).decrementAndGet();
+    tasksInProgress.decrementAndGet();
+    synchronized (tasksInProgress) {
+      tasksInProgress.notifyAll();
+    }
+  }
+
+  @Override
+  public long getNumberOfTsksInProgress() {
+    return tasksInProgress.get();
+  }
+
+  @Override
+  public void waitForMaximumCurrentTasks(long max, long id,
+    int periodToTrigger, Consumer<Long> trigger) throws InterruptedIOException {
+    assert max >= 0;
+    long lastLog = EnvironmentEdgeManager.currentTime();
+    long currentInProgress, oldInProgress = Long.MAX_VALUE;
+    while ((currentInProgress = tasksInProgress.get()) > max) {
+      if (oldInProgress != currentInProgress) { // Wait for in progress to change.
+        long now = EnvironmentEdgeManager.currentTime();
+        if (now > lastLog + periodToTrigger) {
+          lastLog = now;
+          if (trigger != null) {
+            trigger.accept(currentInProgress);
+          }
+          logDetailsOfUndoneTasks(currentInProgress);
+        }
+      }
+      oldInProgress = currentInProgress;
+      try {
+        synchronized (tasksInProgress) {
+          if (tasksInProgress.get() == oldInProgress) {
+            tasksInProgress.wait(10);
+          }
+        }
+      } catch (InterruptedException e) {
+        throw new InterruptedIOException("#" + id + ", interrupted." +
+            " currentNumberOfTask=" + currentInProgress);
+      }
+    }
+  }
+
+  private void logDetailsOfUndoneTasks(long taskInProgress) {
+    if (taskInProgress <= thresholdToLogUndoneTaskDetails) {
+      ArrayList<ServerName> servers = new ArrayList<>();
+      for (Map.Entry<ServerName, AtomicInteger> entry : taskCounterPerServer.entrySet()) {
+        if (entry.getValue().get() > 0) {
+          servers.add(entry.getKey());
+        }
+      }
+      LOG.info("Left over " + taskInProgress + " task(s) are processed on server(s): " + servers);
+    }
+
+    if (taskInProgress <= thresholdToLogRegionDetails) {
+      ArrayList<String> regions = new ArrayList<>();
+      for (Map.Entry<byte[], AtomicInteger> entry : taskCounterPerRegion.entrySet()) {
+        if (entry.getValue().get() > 0) {
+          regions.add(Bytes.toString(entry.getKey()));
+        }
+      }
+      LOG.info("Regions against which left over task(s) are processed: " + regions);
+    }
+  }
+
+  @Override
+  public void waitForFreeSlot(long id, int periodToTrigger, Consumer<Long> trigger) throws InterruptedIOException {
+    waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, id, periodToTrigger, trigger);
+  }
+
+  /**
+   * limit the heapsize of total submitted data. Reduce the limit of heapsize
+   * for submitting quickly if there is no running task.
+   */
+  @VisibleForTesting
+  static class SubmittedSizeChecker implements RowChecker {
+
+    private final long maxHeapSizeSubmit;
+    private long heapSize = 0;
+
+    SubmittedSizeChecker(final long maxHeapSizeSubmit) {
+      this.maxHeapSizeSubmit = maxHeapSizeSubmit;
+    }
+
+    @Override
+    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
+      if (heapSize >= maxHeapSizeSubmit) {
+        return ReturnCode.END;
+      }
+      return ReturnCode.INCLUDE;
+    }
+
+    @Override
+    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
+      if (code == ReturnCode.INCLUDE) {
+        heapSize += rowSize;
+      }
+    }
+
+    @Override
+    public void reset() {
+      heapSize = 0;
+    }
+  }
+
+  /**
+   * limit the max number of tasks in an AsyncProcess.
+   */
+  @VisibleForTesting
+  static class TaskCountChecker implements RowChecker {
+
+    private static final long MAX_WAITING_TIME = 1000; //ms
+    private final Set<HRegionInfo> regionsIncluded = new HashSet<>();
+    private final Set<ServerName> serversIncluded = new HashSet<>();
+    private final int maxConcurrentTasksPerRegion;
+    private final int maxTotalConcurrentTasks;
+    private final int maxConcurrentTasksPerServer;
+    private final Map<byte[], AtomicInteger> taskCounterPerRegion;
+    private final Map<ServerName, AtomicInteger> taskCounterPerServer;
+    private final Set<byte[]> busyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
+    private final AtomicLong tasksInProgress;
+
+    TaskCountChecker(final int maxTotalConcurrentTasks,
+            final int maxConcurrentTasksPerServer,
+            final int maxConcurrentTasksPerRegion,
+            final AtomicLong tasksInProgress,
+            final Map<ServerName, AtomicInteger> taskCounterPerServer,
+            final Map<byte[], AtomicInteger> taskCounterPerRegion) {
+      this.maxTotalConcurrentTasks = maxTotalConcurrentTasks;
+      this.maxConcurrentTasksPerRegion = maxConcurrentTasksPerRegion;
+      this.maxConcurrentTasksPerServer = maxConcurrentTasksPerServer;
+      this.taskCounterPerRegion = taskCounterPerRegion;
+      this.taskCounterPerServer = taskCounterPerServer;
+      this.tasksInProgress = tasksInProgress;
+    }
+
+    @Override
+    public void reset() throws InterruptedIOException {
+      // prevent the busy-waiting
+      waitForRegion();
+      regionsIncluded.clear();
+      serversIncluded.clear();
+      busyRegions.clear();
+    }
+
+    private void waitForRegion() throws InterruptedIOException {
+      if (busyRegions.isEmpty()) {
+        return;
+      }
+      EnvironmentEdge ee = EnvironmentEdgeManager.getDelegate();
+      final long start = ee.currentTime();
+      while ((ee.currentTime() - start) <= MAX_WAITING_TIME) {
+        for (byte[] region : busyRegions) {
+          AtomicInteger count = taskCounterPerRegion.get(region);
+          if (count == null || count.get() < maxConcurrentTasksPerRegion) {
+            return;
+          }
+        }
+        try {
+          synchronized (tasksInProgress) {
+            tasksInProgress.wait(10);
+          }
+        } catch (InterruptedException e) {
+          throw new InterruptedIOException("Interrupted."
+                  + " tasksInProgress=" + tasksInProgress);
+        }
+      }
+    }
+
+    /**
+     * 1) check the regions is allowed. 2) check the concurrent tasks for
+     * regions. 3) check the total concurrent tasks. 4) check the concurrent
+     * tasks for server.
+     *
+     * @param loc
+     * @param rowSize
+     * @return
+     */
+    @Override
+    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
+
+      HRegionInfo regionInfo = loc.getRegionInfo();
+      if (regionsIncluded.contains(regionInfo)) {
+        // We already know what to do with this region.
+        return ReturnCode.INCLUDE;
+      }
+      AtomicInteger regionCnt = taskCounterPerRegion.get(loc.getRegionInfo().getRegionName());
+      if (regionCnt != null && regionCnt.get() >= maxConcurrentTasksPerRegion) {
+        // Too many tasks on this region already.
+        return ReturnCode.SKIP;
+      }
+      int newServers = serversIncluded.size()
+              + (serversIncluded.contains(loc.getServerName()) ? 0 : 1);
+      if ((newServers + tasksInProgress.get()) > maxTotalConcurrentTasks) {
+        // Too many tasks.
+        return ReturnCode.SKIP;
+      }
+      AtomicInteger serverCnt = taskCounterPerServer.get(loc.getServerName());
+      if (serverCnt != null && serverCnt.get() >= maxConcurrentTasksPerServer) {
+        // Too many tasks for this individual server
+        return ReturnCode.SKIP;
+      }
+      return ReturnCode.INCLUDE;
+    }
+
+    @Override
+    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
+      if (code == ReturnCode.INCLUDE) {
+        regionsIncluded.add(loc.getRegionInfo());
+        serversIncluded.add(loc.getServerName());
+      }
+      busyRegions.add(loc.getRegionInfo().getRegionName());
+    }
+  }
+
+  /**
+   * limit the request size for each regionserver.
+   */
+  @VisibleForTesting
+  static class RequestSizeChecker implements RowChecker {
+
+    private final long maxHeapSizePerRequest;
+    private final Map<ServerName, Long> serverRequestSizes = new HashMap<>();
+
+    RequestSizeChecker(final long maxHeapSizePerRequest) {
+      this.maxHeapSizePerRequest = maxHeapSizePerRequest;
+    }
+
+    @Override
+    public void reset() {
+      serverRequestSizes.clear();
+    }
+
+    @Override
+    public ReturnCode canTakeOperation(HRegionLocation loc, long rowSize) {
+      // Is it ok for limit of request size?
+      long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName())
+              ? serverRequestSizes.get(loc.getServerName()) : 0L;
+      // accept at least one request
+      if (currentRequestSize == 0 || currentRequestSize + rowSize <= maxHeapSizePerRequest) {
+        return ReturnCode.INCLUDE;
+      }
+      return ReturnCode.SKIP;
+    }
+
+    @Override
+    public void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize) {
+      if (code == ReturnCode.INCLUDE) {
+        long currentRequestSize = serverRequestSizes.containsKey(loc.getServerName())
+                ? serverRequestSizes.get(loc.getServerName()) : 0L;
+        serverRequestSizes.put(loc.getServerName(), currentRequestSize + rowSize);
+      }
+    }
+  }
+
+  /**
+   * Provide a way to control the flow of rows iteration.
+   */
+  @VisibleForTesting
+  interface RowChecker {
+
+    ReturnCode canTakeOperation(HRegionLocation loc, long rowSize);
+
+    /**
+     * Add the final ReturnCode to the checker. The ReturnCode may be reversed,
+     * so the checker need the final decision to update the inner state.
+     *
+     * @param code The final decision
+     * @param loc the destination of data
+     * @param rowSize the data size
+     */
+    void notifyFinal(ReturnCode code, HRegionLocation loc, long rowSize);
+
+    /**
+     * Reset the inner state.
+     */
+    void reset() throws InterruptedIOException;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/8cb55c40/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index bb6cbb5..ed7202a 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -33,12 +33,10 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.RejectedExecutionException;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -59,15 +57,8 @@ import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.AsyncProcess.ListRowAccess;
-import org.apache.hadoop.hbase.client.AsyncProcess.TaskCountChecker;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker.ReturnCode;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowCheckerHost;
-import org.apache.hadoop.hbase.client.AsyncProcess.RequestSizeChecker;
+import org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
-import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
-import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
@@ -78,60 +69,64 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 import org.mockito.Mockito;
-import org.apache.hadoop.hbase.client.AsyncProcess.RowChecker;
-import org.apache.hadoop.hbase.client.AsyncProcess.SubmittedSizeChecker;
+import org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
 import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+
 
 @Category({ClientTests.class, MediumTests.class})
 public class TestAsyncProcess {
   @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
       withLookingForStuckThread(true).build();
-  private final static Log LOG = LogFactory.getLog(TestAsyncProcess.class);
+  private static final Log LOG = LogFactory.getLog(TestAsyncProcess.class);
   private static final TableName DUMMY_TABLE =
       TableName.valueOf("DUMMY_TABLE");
   private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes();
   private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes();
   private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes();
   private static final byte[] FAILS = "FAILS".getBytes();
-  private static final Configuration conf = new Configuration();
-
-  private static ServerName sn = ServerName.valueOf("s1:1,1");
-  private static ServerName sn2 = ServerName.valueOf("s2:2,2");
-  private static ServerName sn3 = ServerName.valueOf("s3:3,3");
-  private static HRegionInfo hri1 =
+  private static final Configuration CONF = new Configuration();
+  private static final ConnectionConfiguration CONNECTION_CONFIG = new ConnectionConfiguration(CONF);
+  private static final ServerName sn = ServerName.valueOf("s1:1,1");
+  private static final ServerName sn2 = ServerName.valueOf("s2:2,2");
+  private static final ServerName sn3 = ServerName.valueOf("s3:3,3");
+  private static final HRegionInfo hri1 =
       new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-  private static HRegionInfo hri2 =
+  private static final HRegionInfo hri2 =
       new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-  private static HRegionInfo hri3 =
+  private static final HRegionInfo hri3 =
       new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-  private static HRegionLocation loc1 = new HRegionLocation(hri1, sn);
-  private static HRegionLocation loc2 = new HRegionLocation(hri2, sn);
-  private static HRegionLocation loc3 = new HRegionLocation(hri3, sn2);
+  private static final HRegionLocation loc1 = new HRegionLocation(hri1, sn);
+  private static final HRegionLocation loc2 = new HRegionLocation(hri2, sn);
+  private static final HRegionLocation loc3 = new HRegionLocation(hri3, sn2);
 
   // Replica stuff
-  private static HRegionInfo hri1r1 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1),
+  private static final HRegionInfo hri1r1 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 1),
       hri1r2 = RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-  private static HRegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-  private static RegionLocations hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
+  private static final HRegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
+  private static final RegionLocations hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
       new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3));
-  private static RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
+  private static final RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
       new HRegionLocation(hri2r1, sn3));
-  private static RegionLocations hrls3 = new RegionLocations(new HRegionLocation(hri3, sn3), null);
+  private static final RegionLocations hrls3 = new RegionLocations(new HRegionLocation(hri3, sn3), null);
 
   private static final String success = "success";
   private static Exception failure = new Exception("failure");
 
-  private static int NB_RETRIES = 3;
+  private static final int NB_RETRIES = 3;
 
+  private static final int RPC_TIMEOUT = CONF.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
+      HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
+  private static final int OPERATION_TIMEOUT = CONF.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
+      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
   @BeforeClass
   public static void beforeClass(){
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES);
+    CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES);
   }
 
   static class CountingThreadFactory implements ThreadFactory {
@@ -153,20 +148,21 @@ public class TestAsyncProcess {
     final AtomicInteger nbActions = new AtomicInteger();
     public List<AsyncRequestFuture> allReqs = new ArrayList<AsyncRequestFuture>();
     public AtomicInteger callsCt = new AtomicInteger();
-    private static int rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-        HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-    private static int operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+
     private long previousTimeout = -1;
+    final ExecutorService service;
     @Override
-    protected <Res> AsyncRequestFutureImpl<Res> createAsyncRequestFuture(TableName tableName,
-        List<Action> actions, long nonceGroup, ExecutorService pool,
-        Batch.Callback<Res> callback, Object[] results, boolean needResults,
-        CancellableRegionServerCallable callable, int curTimeout) {
+    protected <Res> AsyncRequestFutureImpl<Res> createAsyncRequestFuture(
+      AsyncProcessTask task, List<Action> actions, long nonceGroup) {
       // Test HTable has tableName of null, so pass DUMMY_TABLE
+      AsyncProcessTask wrap = new AsyncProcessTask(task){
+        @Override
+        public TableName getTableName() {
+          return DUMMY_TABLE;
+        }
+      };
       AsyncRequestFutureImpl<Res> r = new MyAsyncRequestFutureImpl<Res>(
-          DUMMY_TABLE, actions, nonceGroup, getPool(pool), needResults,
-          results, callback, callable, operationTimeout, rpcTimeout, this);
+          wrap, actions, nonceGroup, this);
       allReqs.add(r);
       return r;
     }
@@ -176,49 +172,54 @@ public class TestAsyncProcess {
     }
 
     public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) {
-      super(hc, conf, new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
-          new SynchronousQueue<Runnable>(), new CountingThreadFactory(nbThreads)),
-            new RpcRetryingCallerFactory(conf), false, new RpcControllerFactory(conf), rpcTimeout,
-          operationTimeout);
+      super(hc, conf, new RpcRetryingCallerFactory(conf), false, new RpcControllerFactory(conf));
+      service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
+          new SynchronousQueue<>(), new CountingThreadFactory(nbThreads));
     }
 
     public MyAsyncProcess(
         ClusterConnection hc, Configuration conf, boolean useGlobalErrors) {
-      super(hc, conf, new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
-        new SynchronousQueue<Runnable>(), new CountingThreadFactory(new AtomicInteger())),
-          new RpcRetryingCallerFactory(conf), useGlobalErrors, new RpcControllerFactory(conf),
-          rpcTimeout, operationTimeout);
+      super(hc, conf,
+          new RpcRetryingCallerFactory(conf), useGlobalErrors, new RpcControllerFactory(conf));
+      service = Executors.newFixedThreadPool(5);
     }
 
-    public MyAsyncProcess(ClusterConnection hc, Configuration conf, boolean useGlobalErrors,
-        @SuppressWarnings("unused") boolean dummy) {
-      super(hc, conf, new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
-              new SynchronousQueue<Runnable>(), new CountingThreadFactory(new AtomicInteger())) {
-        @Override
-        public void execute(Runnable command) {
-          throw new RejectedExecutionException("test under failure");
-        }
-      },
-          new RpcRetryingCallerFactory(conf), useGlobalErrors, new RpcControllerFactory(conf),
-          rpcTimeout, operationTimeout);
+    public <CResult> AsyncRequestFuture submit(ExecutorService pool, TableName tableName,
+        List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
+        boolean needResults) throws InterruptedIOException {
+      AsyncProcessTask task = AsyncProcessTask.newBuilder(callback)
+              .setPool(pool == null ? service : pool)
+              .setTableName(tableName)
+              .setRowAccess(rows)
+              .setSubmittedRows(atLeastOne ? SubmittedRows.AT_LEAST_ONE : SubmittedRows.NORMAL)
+              .setNeedResults(needResults)
+              .setRpcTimeout(RPC_TIMEOUT)
+              .setOperationTimeout(OPERATION_TIMEOUT)
+              .build();
+      return submit(task);
+    }
+
+    public <CResult> AsyncRequestFuture submit(TableName tableName,
+        final List<? extends Row> rows, boolean atLeastOne, Batch.Callback<CResult> callback,
+        boolean needResults) throws InterruptedIOException {
+      return submit(null, tableName, rows, atLeastOne, callback, needResults);
     }
 
     @Override
-    public <Res> AsyncRequestFuture submit(TableName tableName, RowAccess<? extends Row> rows,
-        boolean atLeastOne, Callback<Res> callback, boolean needResults)
+    public <Res> AsyncRequestFuture submit(AsyncProcessTask<Res> task)
             throws InterruptedIOException {
+      previousTimeout = task.getRpcTimeout();
       // We use results in tests to check things, so override to always save them.
-      return super.submit(DUMMY_TABLE, rows, atLeastOne, callback, true);
+      AsyncProcessTask<Res> wrap = new AsyncProcessTask<Res>(task) {
+        @Override
+        public boolean getNeedResults() {
+          return true;
+        }
+      };
+      return super.submit(wrap);
     }
 
     @Override
-    public <CResult> AsyncRequestFuture submitAll(ExecutorService pool, TableName tableName,
-      List<? extends Row> rows, Batch.Callback<CResult> callback, Object[] results,
-      CancellableRegionServerCallable callable, int curTimeout) {
-      previousTimeout = curTimeout;
-      return super.submitAll(pool, tableName, rows, callback, results, callable, curTimeout);
-    }
-    @Override
     protected RpcRetryingCaller<AbstractResponse> createCaller(
         CancellableRegionServerCallable callable, int rpcTimeout) {
       callsCt.incrementAndGet();
@@ -260,12 +261,9 @@ public class TestAsyncProcess {
 
   static class MyAsyncRequestFutureImpl<Res> extends AsyncRequestFutureImpl<Res> {
 
-    public MyAsyncRequestFutureImpl(TableName tableName, List<Action> actions, long nonceGroup,
-        ExecutorService pool, boolean needResults, Object[] results,
-        Batch.Callback callback, CancellableRegionServerCallable callable, int operationTimeout,
-        int rpcTimeout, AsyncProcess asyncProcess) {
-      super(tableName, actions, nonceGroup, pool, needResults,
-          results, callback, callable, operationTimeout, rpcTimeout, asyncProcess);
+    public MyAsyncRequestFutureImpl(AsyncProcessTask task, List<Action> actions,
+      long nonceGroup, AsyncProcess asyncProcess) {
+      super(task, actions, nonceGroup, asyncProcess);
     }
 
     @Override
@@ -483,7 +481,7 @@ public class TestAsyncProcess {
     final boolean usedRegions[];
 
     protected MyConnectionImpl2(List<HRegionLocation> hrl) throws IOException {
-      super(conf);
+      super(CONF);
       this.hrl = hrl;
       this.usedRegions = new boolean[hrl.size()];
     }
@@ -553,19 +551,7 @@ public class TestAsyncProcess {
     long putsHeapSize = writeBuffer;
     doSubmitRequest(writeBuffer, putsHeapSize);
   }
-  @Test
-  public void testIllegalArgument() throws IOException {
-    ClusterConnection conn = createHConnection();
-    final long maxHeapSizePerRequest = conn.getConfiguration().getLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
-      AsyncProcess.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
-    conn.getConfiguration().setLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, -1);
-    try {
-      MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
-      fail("The maxHeapSizePerRequest must be bigger than zero");
-    } catch (IllegalArgumentException e) {
-    }
-    conn.getConfiguration().setLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, maxHeapSizePerRequest);
-  }
+
   @Test
   public void testSubmitLargeRequestWithUnlimitedSize() throws Exception {
     long maxHeapSizePerRequest = Long.MAX_VALUE;
@@ -601,10 +587,13 @@ public class TestAsyncProcess {
 
   private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception {
     ClusterConnection conn = createHConnection();
-    final long defaultHeapSizePerRequest = conn.getConfiguration().getLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
-      AsyncProcess.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
-    conn.getConfiguration().setLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, maxHeapSizePerRequest);
-    BufferedMutatorParams bufferParam = new BufferedMutatorParams(DUMMY_TABLE);
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    final long defaultHeapSizePerRequest = conn.getConfiguration().getLong(
+      SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE,
+      SimpleRequestController.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, maxHeapSizePerRequest);
 
     // sn has two regions
     long putSizeSN = 0;
@@ -630,11 +619,12 @@ public class TestAsyncProcess {
       + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest
       + ", minCountSnRequest:" + minCountSnRequest
       + ", minCountSn2Request:" + minCountSn2Request);
-    try (HTable ht = new HTable(conn, bufferParam)) {
-      MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
-      ht.mutator.ap = ap;
 
-      Assert.assertEquals(0L, ht.mutator.currentWriteBufferSize.get());
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    try (HTable ht = new HTable(conn, mutator)) {
+      Assert.assertEquals(0L, ht.mutator.getCurrentWriteBufferSize());
       ht.put(puts);
       List<AsyncRequestFuture> reqs = ap.allReqs;
 
@@ -680,12 +670,17 @@ public class TestAsyncProcess {
       assertEquals(putSizeSN2, (long) sizePerServers.get(sn2));
     }
     // restore config.
-    conn.getConfiguration().setLong(AsyncProcess.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, defaultHeapSizePerRequest);
+    conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, defaultHeapSizePerRequest);
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
+
   @Test
   public void testSubmit() throws Exception {
     ClusterConnection hc = createHConnection();
-    AsyncProcess ap = new MyAsyncProcess(hc, conf);
+    MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
@@ -704,7 +699,7 @@ public class TestAsyncProcess {
         updateCalled.incrementAndGet();
       }
     };
-    AsyncProcess ap = new MyAsyncProcess(hc, conf);
+    MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
@@ -717,13 +712,16 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitBusyRegion() throws Exception {
-    ClusterConnection hc = createHConnection();
-    AsyncProcess ap = new MyAsyncProcess(hc, conf);
-
+    ClusterConnection conn = createHConnection();
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
+    SimpleRequestController controller = (SimpleRequestController) ap.requestController;
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
 
-    for (int i = 0; i != ap.maxConcurrentTasksPerRegion; ++i) {
+    for (int i = 0; i != controller.maxConcurrentTasksPerRegion; ++i) {
       ap.incTaskCounters(Arrays.asList(hri1.getRegionName()), sn);
     }
     ap.submit(null, DUMMY_TABLE, puts, false, null, false);
@@ -732,15 +730,22 @@ public class TestAsyncProcess {
     ap.decTaskCounters(Arrays.asList(hri1.getRegionName()), sn);
     ap.submit(null, DUMMY_TABLE, puts, false, null, false);
     Assert.assertEquals(0, puts.size());
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
 
 
   @Test
   public void testSubmitBusyRegionServer() throws Exception {
-    ClusterConnection hc = createHConnection();
-    AsyncProcess ap = new MyAsyncProcess(hc, conf);
-
-    ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer));
+    ClusterConnection conn = createHConnection();
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    SimpleRequestController controller = (SimpleRequestController) ap.requestController;
+    controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer));
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
@@ -751,14 +756,18 @@ public class TestAsyncProcess {
     ap.submit(null, DUMMY_TABLE, puts, false, null, false);
     Assert.assertEquals(" puts=" + puts, 1, puts.size());
 
-    ap.taskCounterPerServer.put(sn2, new AtomicInteger(ap.maxConcurrentTasksPerServer - 1));
+    controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer - 1));
     ap.submit(null, DUMMY_TABLE, puts, false, null, false);
     Assert.assertTrue(puts.isEmpty());
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
 
   @Test
   public void testFail() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
     List<Put> puts = new ArrayList<Put>();
     Put p = createPut(1, false);
@@ -784,10 +793,15 @@ public class TestAsyncProcess {
 
   @Test
   public void testSubmitTrue() throws IOException {
-    final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
-    ap.tasksInProgress.incrementAndGet();
-    final AtomicInteger ai = new AtomicInteger(ap.maxConcurrentTasksPerRegion);
-    ap.taskCounterPerRegion.put(hri1.getRegionName(), ai);
+    ClusterConnection conn = createHConnection();
+    final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, false);
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    SimpleRequestController controller = (SimpleRequestController) ap.requestController;
+    controller.tasksInProgress.incrementAndGet();
+    final AtomicInteger ai = new AtomicInteger(controller.maxConcurrentTasksPerRegion);
+    controller.taskCounterPerRegion.put(hri1.getRegionName(), ai);
 
     final AtomicBoolean checkPoint = new AtomicBoolean(false);
     final AtomicBoolean checkPoint2 = new AtomicBoolean(false);
@@ -798,7 +812,7 @@ public class TestAsyncProcess {
         Threads.sleep(1000);
         Assert.assertFalse(checkPoint.get()); // TODO: this is timing-dependent
         ai.decrementAndGet();
-        ap.tasksInProgress.decrementAndGet();
+        controller.tasksInProgress.decrementAndGet();
         checkPoint2.set(true);
       }
     };
@@ -819,11 +833,15 @@ public class TestAsyncProcess {
     while (!checkPoint2.get()){
       Threads.sleep(1);
     }
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
 
   @Test
   public void testFailAndSuccess() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, false));
@@ -850,7 +868,7 @@ public class TestAsyncProcess {
 
   @Test
   public void testFlush() throws Exception {
-    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+    MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, false));
@@ -868,24 +886,32 @@ public class TestAsyncProcess {
   @Test
   public void testTaskCountWithoutClientBackoffPolicy() throws IOException, InterruptedException {
     ClusterConnection hc = createHConnection();
-    MyAsyncProcess ap = new MyAsyncProcess(hc, conf, false);
+    MyAsyncProcess ap = new MyAsyncProcess(hc, CONF, false);
     testTaskCount(ap);
   }
 
   @Test
   public void testTaskCountWithClientBackoffPolicy() throws IOException, InterruptedException {
-    Configuration copyConf = new Configuration(conf);
+    Configuration copyConf = new Configuration(CONF);
     copyConf.setBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE, true);
     MyClientBackoffPolicy bp = new MyClientBackoffPolicy();
-    ClusterConnection hc = createHConnection();
-    Mockito.when(hc.getConfiguration()).thenReturn(copyConf);
-    Mockito.when(hc.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf));
-    Mockito.when(hc.getBackoffPolicy()).thenReturn(bp);
-    MyAsyncProcess ap = new MyAsyncProcess(hc, copyConf, false);
+    ClusterConnection conn = createHConnection();
+    Mockito.when(conn.getConfiguration()).thenReturn(copyConf);
+    Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf));
+    Mockito.when(conn.getBackoffPolicy()).thenReturn(bp);
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf, false);
     testTaskCount(ap);
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
 
-  private void testTaskCount(AsyncProcess ap) throws InterruptedIOException, InterruptedException {
+  private void testTaskCount(MyAsyncProcess ap) throws InterruptedIOException, InterruptedException {
+    SimpleRequestController controller = (SimpleRequestController) ap.requestController;
     List<Put> puts = new ArrayList<>();
     for (int i = 0; i != 3; ++i) {
       puts.add(createPut(1, true));
@@ -896,18 +922,24 @@ public class TestAsyncProcess {
     ap.waitForMaximumCurrentTasks(0, null);
     // More time to wait if there are incorrect task count.
     TimeUnit.SECONDS.sleep(1);
-    assertEquals(0, ap.tasksInProgress.get());
-    for (AtomicInteger count : ap.taskCounterPerRegion.values()) {
+    assertEquals(0, controller.tasksInProgress.get());
+    for (AtomicInteger count : controller.taskCounterPerRegion.values()) {
       assertEquals(0, count.get());
     }
-    for (AtomicInteger count : ap.taskCounterPerServer.values()) {
+    for (AtomicInteger count : controller.taskCounterPerServer.values()) {
       assertEquals(0, count.get());
     }
   }
 
   @Test
   public void testMaxTask() throws Exception {
-    final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf, false);
+    ClusterConnection conn = createHConnection();
+    final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY);
+    conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+      SimpleRequestController.class.getName());
+    final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, false);
+    SimpleRequestController controller = (SimpleRequestController) ap.requestController;
+
 
     for (int i = 0; i < 1000; i++) {
       ap.incTaskCounters(Arrays.asList("dummy".getBytes()), sn);
@@ -940,7 +972,7 @@ public class TestAsyncProcess {
       @Override
       public void run() {
         Threads.sleep(sleepTime);
-        while (ap.tasksInProgress.get() > 0) {
+        while (controller.tasksInProgress.get() > 0) {
           ap.decTaskCounters(Arrays.asList("dummy".getBytes()), sn);
         }
       }
@@ -953,6 +985,10 @@ public class TestAsyncProcess {
 
     //Adds 100 to secure us against approximate timing.
     Assert.assertTrue(start + 100L + sleepTime > end);
+    if (defaultClazz != null) {
+      conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY,
+        defaultClazz);
+    }
   }
 
   private static ClusterConnection createHConnection() throws IOException {
@@ -999,38 +1035,53 @@ public class TestAsyncProcess {
     NonceGenerator ng = Mockito.mock(NonceGenerator.class);
     Mockito.when(ng.getNonceGroup()).thenReturn(HConstants.NO_NONCE);
     Mockito.when(hc.getNonceGenerator()).thenReturn(ng);
-    Mockito.when(hc.getConfiguration()).thenReturn(conf);
+    Mockito.when(hc.getConfiguration()).thenReturn(CONF);
+    Mockito.when(hc.getConnectionConfiguration()).thenReturn(CONNECTION_CONFIG);
     return hc;
   }
 
   @Test
   public void testHTablePutSuccess() throws Exception {
-    BufferedMutatorImpl ht = Mockito.mock(BufferedMutatorImpl.class);
-    ht.ap = new MyAsyncProcess(createHConnection(), conf, true);
+    ClusterConnection conn = createHConnection();
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap);
 
     Put put = createPut(1, true);
 
-    Assert.assertEquals(0, ht.getWriteBufferSize());
+    Assert.assertEquals(conn.getConnectionConfiguration().getWriteBufferSize(), ht.getWriteBufferSize());
+    Assert.assertEquals(0, ht.getCurrentWriteBufferSize());
     ht.mutate(put);
-    Assert.assertEquals(0, ht.getWriteBufferSize());
+    ht.flush();
+    Assert.assertEquals(0, ht.getCurrentWriteBufferSize());
+  }
+
+  @Test
+  public void testBufferedMutatorImplWithSharedPool() throws Exception {
+    ClusterConnection conn = createHConnection();
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutator ht = new BufferedMutatorImpl(conn, bufferParam, ap);
+
+    ht.close();
+    assertFalse(ap.service.isShutdown());
   }
 
   private void doHTableFailedPut(boolean bufferOn) throws Exception {
     ClusterConnection conn = createHConnection();
-    BufferedMutatorParams bufferParam = new BufferedMutatorParams(DUMMY_TABLE);
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
     if (bufferOn) {
       bufferParam.writeBufferSize(1024L * 1024L);
     } else {
       bufferParam.writeBufferSize(0L);
     }
-
-    HTable ht = new HTable(conn, bufferParam);
-    MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
-    ht.mutator.ap = ap;
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    HTable ht = new HTable(conn, mutator);
 
     Put put = createPut(1, false);
 
-    Assert.assertEquals(0L, ht.mutator.currentWriteBufferSize.get());
+    Assert.assertEquals(0L, ht.mutator.getCurrentWriteBufferSize());
     try {
       ht.put(put);
       if (bufferOn) {
@@ -1039,7 +1090,7 @@ public class TestAsyncProcess {
       Assert.fail();
     } catch (RetriesExhaustedException expected) {
     }
-    Assert.assertEquals(0L, ht.mutator.currentWriteBufferSize.get());
+    Assert.assertEquals(0L, ht.mutator.getCurrentWriteBufferSize());
     // The table should have sent one request, maybe after multiple attempts
     AsyncRequestFuture ars = null;
     for (AsyncRequestFuture someReqs : ap.allReqs) {
@@ -1067,10 +1118,10 @@ public class TestAsyncProcess {
   @Test
   public void testHTableFailedPutAndNewPut() throws Exception {
     ClusterConnection conn = createHConnection();
-    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, null, null,
-        new BufferedMutatorParams(DUMMY_TABLE).writeBufferSize(0));
-    MyAsyncProcess ap = new MyAsyncProcess(conn, conf, true);
-    mutator.ap = ap;
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE)
+            .writeBufferSize(0);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
 
     Put p = createPut(1, false);
     mutator.mutate(p);
@@ -1083,202 +1134,13 @@ public class TestAsyncProcess {
     //  puts, we may raise an exception in the middle of the list. It's then up to the caller to
     //  manage what was inserted, what was tried but failed, and what was not even tried.
     p = createPut(1, true);
-    Assert.assertEquals(0, mutator.writeAsyncBuffer.size());
+    Assert.assertEquals(0, mutator.size());
     try {
       mutator.mutate(p);
       Assert.fail();
     } catch (RetriesExhaustedException expected) {
     }
-    Assert.assertEquals("the put should not been inserted.", 0, mutator.writeAsyncBuffer.size());
-  }
-
-  @Test
-  public void testTaskCheckerHost() throws IOException {
-    final int maxTotalConcurrentTasks = 100;
-    final int maxConcurrentTasksPerServer = 2;
-    final int maxConcurrentTasksPerRegion = 1;
-    final AtomicLong tasksInProgress = new AtomicLong(0);
-    final Map<ServerName, AtomicInteger> taskCounterPerServer = new HashMap<>();
-    final Map<byte[], AtomicInteger> taskCounterPerRegion = new HashMap<>();
-    TaskCountChecker countChecker = new TaskCountChecker(
-      maxTotalConcurrentTasks,
-      maxConcurrentTasksPerServer,
-      maxConcurrentTasksPerRegion,
-      tasksInProgress, taskCounterPerServer, taskCounterPerRegion);
-    final long maxHeapSizePerRequest = 2 * 1024 * 1024;
-    // unlimiited
-    RequestSizeChecker sizeChecker = new RequestSizeChecker(maxHeapSizePerRequest);
-    RowCheckerHost checkerHost = new RowCheckerHost(Arrays.asList(countChecker, sizeChecker));
-
-    ReturnCode loc1Code = checkerHost.canTakeOperation(loc1, maxHeapSizePerRequest);
-    assertEquals(RowChecker.ReturnCode.INCLUDE, loc1Code);
-
-    ReturnCode loc1Code_2 = checkerHost.canTakeOperation(loc1, maxHeapSizePerRequest);
-    // rejected for size
-    assertNotEquals(RowChecker.ReturnCode.INCLUDE, loc1Code_2);
-
-    ReturnCode loc2Code = checkerHost.canTakeOperation(loc2, maxHeapSizePerRequest);
-    // rejected for size
-    assertNotEquals(RowChecker.ReturnCode.INCLUDE, loc2Code);
-
-    // fill the task slots for loc3.
-    taskCounterPerRegion.put(loc3.getRegionInfo().getRegionName(), new AtomicInteger(100));
-    taskCounterPerServer.put(loc3.getServerName(), new AtomicInteger(100));
-
-    ReturnCode loc3Code = checkerHost.canTakeOperation(loc3, 1L);
-    // rejected for count
-    assertNotEquals(RowChecker.ReturnCode.INCLUDE, loc3Code);
-
-    // release the task slots for loc3.
-    taskCounterPerRegion.put(loc3.getRegionInfo().getRegionName(), new AtomicInteger(0));
-    taskCounterPerServer.put(loc3.getServerName(), new AtomicInteger(0));
-
-    ReturnCode loc3Code_2 = checkerHost.canTakeOperation(loc3, 1L);
-    assertEquals(RowChecker.ReturnCode.INCLUDE, loc3Code_2);
-  }
-
-  @Test
-  public void testRequestSizeCheckerr() throws IOException {
-    final long maxHeapSizePerRequest = 2 * 1024 * 1024;
-    final ClusterConnection conn = createHConnection();
-    RequestSizeChecker checker = new RequestSizeChecker(maxHeapSizePerRequest);
-
-    // inner state is unchanged.
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode code = checker.canTakeOperation(loc1, maxHeapSizePerRequest);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-      code = checker.canTakeOperation(loc2, maxHeapSizePerRequest);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-    }
-
-    // accept the data located on loc1 region.
-    ReturnCode acceptCode = checker.canTakeOperation(loc1, maxHeapSizePerRequest);
-    assertEquals(RowChecker.ReturnCode.INCLUDE, acceptCode);
-    checker.notifyFinal(acceptCode, loc1, maxHeapSizePerRequest);
-
-    // the sn server reachs the limit.
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode code = checker.canTakeOperation(loc1, maxHeapSizePerRequest);
-      assertNotEquals(RowChecker.ReturnCode.INCLUDE, code);
-      code = checker.canTakeOperation(loc2, maxHeapSizePerRequest);
-      assertNotEquals(RowChecker.ReturnCode.INCLUDE, code);
-    }
-
-    // the request to sn2 server should be accepted.
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode code = checker.canTakeOperation(loc3, maxHeapSizePerRequest);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-    }
-
-    checker.reset();
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode code = checker.canTakeOperation(loc1, maxHeapSizePerRequest);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-      code = checker.canTakeOperation(loc2, maxHeapSizePerRequest);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-    }
-  }
-
-  @Test
-  public void testSubmittedSizeChecker() {
-    final long maxHeapSizeSubmit = 2 * 1024 * 1024;
-    SubmittedSizeChecker checker = new SubmittedSizeChecker(maxHeapSizeSubmit);
-
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode include = checker.canTakeOperation(loc1, 100000);
-      assertEquals(ReturnCode.INCLUDE, include);
-    }
-
-    for (int i = 0; i != 10; ++i) {
-      checker.notifyFinal(ReturnCode.INCLUDE, loc1, maxHeapSizeSubmit);
-    }
-
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode include = checker.canTakeOperation(loc1, 100000);
-      assertEquals(ReturnCode.END, include);
-    }
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode include = checker.canTakeOperation(loc2, 100000);
-      assertEquals(ReturnCode.END, include);
-    }
-    checker.reset();
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode include = checker.canTakeOperation(loc1, 100000);
-      assertEquals(ReturnCode.INCLUDE, include);
-    }
-  }
-  @Test
-  public void testTaskCountChecker() throws InterruptedIOException {
-    long rowSize = 12345;
-    int maxTotalConcurrentTasks = 100;
-    int maxConcurrentTasksPerServer = 2;
-    int maxConcurrentTasksPerRegion = 1;
-    AtomicLong tasksInProgress = new AtomicLong(0);
-    Map<ServerName, AtomicInteger> taskCounterPerServer = new HashMap<>();
-    Map<byte[], AtomicInteger> taskCounterPerRegion = new HashMap<>();
-    TaskCountChecker checker = new TaskCountChecker(
-      maxTotalConcurrentTasks,
-      maxConcurrentTasksPerServer,
-      maxConcurrentTasksPerRegion,
-      tasksInProgress, taskCounterPerServer, taskCounterPerRegion);
-
-    // inner state is unchanged.
-    for (int i = 0; i != 10; ++i) {
-      ReturnCode code = checker.canTakeOperation(loc1, rowSize);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-    }
-    // add loc1 region.
-    ReturnCode code = checker.canTakeOperation(loc1, rowSize);
-    assertEquals(RowChecker.ReturnCode.INCLUDE, code);
-    checker.notifyFinal(code, loc1, rowSize);
-
-    // fill the task slots for loc1.
-    taskCounterPerRegion.put(loc1.getRegionInfo().getRegionName(), new AtomicInteger(100));
-    taskCounterPerServer.put(loc1.getServerName(), new AtomicInteger(100));
-
-    // the region was previously accepted, so it must be accpted now.
-    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
-      ReturnCode includeCode = checker.canTakeOperation(loc1, rowSize);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, includeCode);
-      checker.notifyFinal(includeCode, loc1, rowSize);
-    }
-
-    // fill the task slots for loc3.
-    taskCounterPerRegion.put(loc3.getRegionInfo().getRegionName(), new AtomicInteger(100));
-    taskCounterPerServer.put(loc3.getServerName(), new AtomicInteger(100));
-
-    // no task slots.
-    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
-      ReturnCode excludeCode = checker.canTakeOperation(loc3, rowSize);
-      assertNotEquals(RowChecker.ReturnCode.INCLUDE, excludeCode);
-      checker.notifyFinal(excludeCode, loc3, rowSize);
-    }
-
-    // release the tasks for loc3.
-    taskCounterPerRegion.put(loc3.getRegionInfo().getRegionName(), new AtomicInteger(0));
-    taskCounterPerServer.put(loc3.getServerName(), new AtomicInteger(0));
-
-    // add loc3 region.
-    ReturnCode code3 = checker.canTakeOperation(loc3, rowSize);
-    assertEquals(RowChecker.ReturnCode.INCLUDE, code3);
-    checker.notifyFinal(code3, loc3, rowSize);
-
-    // the region was previously accepted, so it must be accpted now.
-    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
-      ReturnCode includeCode = checker.canTakeOperation(loc3, rowSize);
-      assertEquals(RowChecker.ReturnCode.INCLUDE, includeCode);
-      checker.notifyFinal(includeCode, loc3, rowSize);
-    }
-
-    checker.reset();
-    // the region was previously accepted,
-    // but checker have reseted and task slots for loc1 is full.
-    // So it must be rejected now.
-    for (int i = 0; i != maxConcurrentTasksPerRegion * 5; ++i) {
-      ReturnCode includeCode = checker.canTakeOperation(loc1, rowSize);
-      assertNotEquals(RowChecker.ReturnCode.INCLUDE, includeCode);
-      checker.notifyFinal(includeCode, loc1, rowSize);
-    }
+    Assert.assertEquals("the put should not been inserted.", 0, mutator.size());
   }
 
   @Test
@@ -1302,9 +1164,12 @@ public class TestAsyncProcess {
 
   @Test
   public void testBatch() throws IOException, InterruptedException {
-    ClusterConnection conn = new MyConnectionImpl(conf);
-    HTable ht = new HTable(conn, new BufferedMutatorParams(DUMMY_TABLE));
-    ht.multiAp = new MyAsyncProcess(conn, conf, false);
+    ClusterConnection conn = new MyConnectionImpl(CONF);
+    MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    HTable ht = new HTable(conn, mutator);
+    ht.multiAp = new MyAsyncProcess(conn, CONF, false);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
@@ -1332,18 +1197,16 @@ public class TestAsyncProcess {
   }
   @Test
   public void testErrorsServers() throws IOException {
-    Configuration configuration = new Configuration(conf);
+    Configuration configuration = new Configuration(CONF);
     ClusterConnection conn = new MyConnectionImpl(configuration);
-    BufferedMutatorImpl mutator =
-        new BufferedMutatorImpl(conn, null, null, new BufferedMutatorParams(DUMMY_TABLE));
-    configuration.setBoolean(ConnectionImplementation.RETRIES_BY_SERVER_KEY, true);
-
     MyAsyncProcess ap = new MyAsyncProcess(conn, configuration, true);
-    mutator.ap = ap;
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    configuration.setBoolean(ConnectionImplementation.RETRIES_BY_SERVER_KEY, true);
 
-    Assert.assertNotNull(mutator.ap.createServerErrorTracker());
-    Assert.assertTrue(mutator.ap.serverTrackerTimeout > 200);
-    mutator.ap.serverTrackerTimeout = 1;
+    Assert.assertNotNull(ap.createServerErrorTracker());
+    Assert.assertTrue(ap.serverTrackerTimeout > 200);
+    ap.serverTrackerTimeout = 1;
 
     Put p = createPut(1, false);
     mutator.mutate(p);
@@ -1361,14 +1224,15 @@ public class TestAsyncProcess {
   public void testReadAndWriteTimeout() throws IOException {
     final long readTimeout = 10 * 1000;
     final long writeTimeout = 20 * 1000;
-    Configuration copyConf = new Configuration(conf);
+    Configuration copyConf = new Configuration(CONF);
     copyConf.setLong(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, readTimeout);
     copyConf.setLong(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, writeTimeout);
     ClusterConnection conn = createHConnection();
     Mockito.when(conn.getConfiguration()).thenReturn(copyConf);
-    BufferedMutatorParams bufferParam = new BufferedMutatorParams(DUMMY_TABLE);
-    try (HTable ht = new HTable(conn, bufferParam)) {
-      MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf, true);
+    MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf, true);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    try (HTable ht = new HTable(conn, mutator)) {
       ht.multiAp = ap;
       List<Get> gets = new LinkedList<>();
       gets.add(new Get(DUMMY_BYTES_1));
@@ -1399,12 +1263,12 @@ public class TestAsyncProcess {
 
   @Test
   public void testGlobalErrors() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(conf);
-    BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(DUMMY_TABLE);
-    AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, conf, new IOException("test"));
-    mutator.ap = ap;
+    ClusterConnection conn = new MyConnectionImpl(CONF);
+    AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new IOException("test"));
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
 
-    Assert.assertNotNull(mutator.ap.createServerErrorTracker());
+    Assert.assertNotNull(ap.createServerErrorTracker());
 
     Put p = createPut(1, true);
     mutator.mutate(p);
@@ -1421,13 +1285,11 @@ public class TestAsyncProcess {
 
   @Test
   public void testCallQueueTooLarge() throws IOException {
-    ClusterConnection conn = new MyConnectionImpl(conf);
-    BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(DUMMY_TABLE);
-    AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, conf, new CallQueueTooBigException());
-    mutator.ap = ap;
-
-    Assert.assertNotNull(mutator.ap.createServerErrorTracker());
-
+    ClusterConnection conn = new MyConnectionImpl(CONF);
+    AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException());
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    Assert.assertNotNull(ap.createServerErrorTracker());
     Put p = createPut(1, true);
     mutator.mutate(p);
 
@@ -1459,10 +1321,11 @@ public class TestAsyncProcess {
     }
 
     MyConnectionImpl2 con = new MyConnectionImpl2(hrls);
-    HTable ht = new HTable(con, new BufferedMutatorParams(DUMMY_TABLE));
-    MyAsyncProcess ap = new MyAsyncProcess(con, conf, con.nbThreads);
+    MyAsyncProcess ap = new MyAsyncProcess(con, CONF, con.nbThreads);
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(con , bufferParam, ap);
+    HTable ht = new HTable(con, mutator);
     ht.multiAp = ap;
-
     ht.batch(gets, null);
 
     Assert.assertEquals(ap.nbActions.get(), NB_REGS);
@@ -1482,7 +1345,16 @@ public class TestAsyncProcess {
     // One region has no replica, so the main call succeeds for it.
     MyAsyncProcessWithReplicas ap = createReplicaAp(10, 1000, 0);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2, DUMMY_BYTES_3);
-    AsyncRequestFuture ars = ap.submitAll(null,DUMMY_TABLE, rows, null, new Object[3]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[3])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.TRUE, RR.TRUE, RR.FALSE);
     Assert.assertEquals(2, ap.getReplicaCallCount());
   }
@@ -1492,7 +1364,16 @@ public class TestAsyncProcess {
     // Main call succeeds before replica calls are kicked off.
     MyAsyncProcessWithReplicas ap = createReplicaAp(1000, 10, 0);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2, DUMMY_BYTES_3);
-    AsyncRequestFuture ars = ap.submitAll(null, DUMMY_TABLE, rows, null, new Object[3]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[3])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.FALSE, RR.FALSE, RR.FALSE);
     Assert.assertEquals(0, ap.getReplicaCallCount());
   }
@@ -1502,7 +1383,16 @@ public class TestAsyncProcess {
     // Either main or replica can succeed.
     MyAsyncProcessWithReplicas ap = createReplicaAp(0, 0, 0);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2);
-    AsyncRequestFuture ars = ap.submitAll(null, DUMMY_TABLE, rows, null, new Object[2]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[2])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.DONT_CARE, RR.DONT_CARE);
     long replicaCalls = ap.getReplicaCallCount();
     Assert.assertTrue(replicaCalls >= 0);
@@ -1517,7 +1407,16 @@ public class TestAsyncProcess {
     MyAsyncProcessWithReplicas ap = createReplicaAp(1000, 0, 0);
     ap.setPrimaryCallDelay(sn2, 2000);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2);
-    AsyncRequestFuture ars = ap.submitAll(null ,DUMMY_TABLE, rows, null, new Object[2]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[2])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.FALSE, RR.TRUE);
     Assert.assertEquals(1, ap.getReplicaCallCount());
   }
@@ -1530,7 +1429,16 @@ public class TestAsyncProcess {
     MyAsyncProcessWithReplicas ap = createReplicaAp(1000, 0, 0, 0);
     ap.addFailures(hri1, hri2);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2);
-    AsyncRequestFuture ars = ap.submitAll(null, DUMMY_TABLE, rows, null, new Object[2]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[2])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.FAILED, RR.FAILED);
     Assert.assertEquals(0, ap.getReplicaCallCount());
   }
@@ -1542,7 +1450,16 @@ public class TestAsyncProcess {
     MyAsyncProcessWithReplicas ap = createReplicaAp(0, 1000, 1000, 0);
     ap.addFailures(hri1, hri1r2, hri2);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2);
-    AsyncRequestFuture ars = ap.submitAll(null, DUMMY_TABLE, rows, null, new Object[2]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[2])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.TRUE, RR.TRUE);
     Assert.assertEquals(2, ap.getReplicaCallCount());
   }
@@ -1554,7 +1471,16 @@ public class TestAsyncProcess {
     MyAsyncProcessWithReplicas ap = createReplicaAp(500, 1000, 0, 0);
     ap.addFailures(hri1, hri1r1, hri1r2, hri2r1);
     List<Get> rows = makeTimelineGets(DUMMY_BYTES_1, DUMMY_BYTES_2);
-    AsyncRequestFuture ars = ap.submitAll(null, DUMMY_TABLE, rows, null, new Object[2]);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(ap.service)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(rows)
+            .setResults(new Object[2])
+            .setSubmittedRows(SubmittedRows.ALL)
+            .build();
+    AsyncRequestFuture ars = ap.submit(task);
     verifyReplicaResult(ars, RR.FAILED, RR.FALSE);
     // We should get 3 exceptions, for main + 2 replicas for DUMMY_BYTES_1
     Assert.assertEquals(3, ars.getErrors().getNumExceptions());
@@ -1583,6 +1509,13 @@ public class TestAsyncProcess {
     return ap;
   }
 
+  private static BufferedMutatorParams createBufferedMutatorParams(MyAsyncProcess ap, TableName name) {
+    return new BufferedMutatorParams(name)
+            .pool(ap.service)
+            .rpcTimeout(RPC_TIMEOUT)
+            .opertationTimeout(OPERATION_TIMEOUT);
+  }
+
   private static List<Get> makeTimelineGets(byte[]... rows) {
     List<Get> result = new ArrayList<Get>();
     for (byte[] row : rows) {
@@ -1663,14 +1596,9 @@ public class TestAsyncProcess {
   }
 
   static class AsyncProcessForThrowableCheck extends AsyncProcess {
-    private static int rpcTimeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
-        HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
-    private static int operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-    public AsyncProcessForThrowableCheck(ClusterConnection hc, Configuration conf,
-        ExecutorService pool) {
-      super(hc, conf, pool, new RpcRetryingCallerFactory(conf), false, new RpcControllerFactory(
-          conf), rpcTimeout, operationTimeout);
+    public AsyncProcessForThrowableCheck(ClusterConnection hc, Configuration conf) {
+      super(hc, conf, new RpcRetryingCallerFactory(conf), false, new RpcControllerFactory(
+          conf));
     }
   }
 
@@ -1681,56 +1609,22 @@ public class TestAsyncProcess {
     MyThreadPoolExecutor myPool =
         new MyThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS,
             new LinkedBlockingQueue<Runnable>(200));
-    AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, conf, myPool);
+    AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, CONF);
 
     List<Put> puts = new ArrayList<Put>();
     puts.add(createPut(1, true));
-
-    ap.submit(null, DUMMY_TABLE, puts, false, null, false);
+    AsyncProcessTask task = AsyncProcessTask.newBuilder()
+            .setPool(myPool)
+            .setRpcTimeout(RPC_TIMEOUT)
+            .setOperationTimeout(OPERATION_TIMEOUT)
+            .setTableName(DUMMY_TABLE)
+            .setRowAccess(puts)
+            .setSubmittedRows(SubmittedRows.NORMAL)
+            .build();
+    ap.submit(task);
     Assert.assertTrue(puts.isEmpty());
   }
 
-  @Test
-  public void testWaitForMaximumCurrentTasks() throws Exception {
-    final AtomicLong tasks = new AtomicLong(0);
-    final AtomicInteger max = new AtomicInteger(0);
-    final CyclicBarrier barrier = new CyclicBarrier(2);
-    final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf);
-    Runnable runnable = new Runnable() {
-      @Override
-      public void run() {
-        try {
-          barrier.await();
-          ap.waitForMaximumCurrentTasks(max.get(), tasks, 1, null);
-        } catch (InterruptedIOException e) {
-          Assert.fail(e.getMessage());
-        } catch (InterruptedException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        } catch (BrokenBarrierException e) {
-          // TODO Auto-generated catch block
-          e.printStackTrace();
-        }
-      }
-    };
-    // First test that our runnable thread only exits when tasks is zero.
-    Thread t = new Thread(runnable);
-    t.start();
-    barrier.await();
-    t.join();
-    // Now assert we stay running if max == zero and tasks is > 0.
-    barrier.reset();
-    tasks.set(1000000);
-    t = new Thread(runnable);
-    t.start();
-    barrier.await();
-    while (tasks.get() > 0) {
-      assertTrue(t.isAlive());
-      tasks.set(tasks.get() - 1);
-    }
-    t.join();
-  }
-
   /**
    * Test and make sure we could use a special pause setting when retry with
    * CallQueueTooBigException, see HBASE-17114
@@ -1738,18 +1632,18 @@ public class TestAsyncProcess {
    */
   @Test
   public void testRetryPauseWithCallQueueTooBigException() throws Exception {
-    Configuration myConf = new Configuration(conf);
+    Configuration myConf = new Configuration(CONF);
     final long specialPause = 500L;
     final int retries = 1;
     myConf.setLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, specialPause);
     myConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries);
     ClusterConnection conn = new MyConnectionImpl(myConf);
-    BufferedMutatorImpl mutator = (BufferedMutatorImpl) conn.getBufferedMutator(DUMMY_TABLE);
     AsyncProcessWithFailure ap =
         new AsyncProcessWithFailure(conn, myConf, new CallQueueTooBigException());
-    mutator.ap = ap;
+    BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
 
-    Assert.assertNotNull(mutator.ap.createServerErrorTracker());
+    Assert.assertNotNull(mutator.getAsyncProcess().createServerErrorTracker());
 
     Put p = createPut(1, true);
     mutator.mutate(p);
@@ -1775,8 +1669,9 @@ public class TestAsyncProcess {
     final long normalPause =
         myConf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
     ap = new AsyncProcessWithFailure(conn, myConf, new IOException());
-    mutator.ap = ap;
-    Assert.assertNotNull(mutator.ap.createServerErrorTracker());
+    bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
+    mutator = new BufferedMutatorImpl(conn, bufferParam, ap);
+    Assert.assertNotNull(mutator.getAsyncProcess().createServerErrorTracker());
     mutator.mutate(p);
     startTime = System.currentTimeMillis();
     try {


[02/50] [abbrv] hbase git commit: HBASE-11392 add/remove peer requests should be routed through master

Posted by sy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/e1f4aaea/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
new file mode 100644
index 0000000..c91796d
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ReplicationProtos.java
@@ -0,0 +1,2158 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: Replication.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class ReplicationProtos {
+  private ReplicationProtos() {}
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) {
+  }
+
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) {
+    registerAllExtensions(
+        (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry);
+  }
+  public interface AddReplicationPeerRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.AddReplicationPeerRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    boolean hasPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig();
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.AddReplicationPeerRequest}
+   */
+  public  static final class AddReplicationPeerRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.AddReplicationPeerRequest)
+      AddReplicationPeerRequestOrBuilder {
+    // Use AddReplicationPeerRequest.newBuilder() to construct.
+    private AddReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private AddReplicationPeerRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private AddReplicationPeerRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = peerConfig_.toBuilder();
+              }
+              peerConfig_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(peerConfig_);
+                peerConfig_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int PEER_CONFIG_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_;
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public boolean hasPeerConfig() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+    /**
+     * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+      return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasPeerConfig()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getPeerConfig().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, getPeerConfig());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, getPeerConfig());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && (hasPeerConfig() == other.hasPeerConfig());
+      if (hasPeerConfig()) {
+        result = result && getPeerConfig()
+            .equals(other.getPeerConfig());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      if (hasPeerConfig()) {
+        hash = (37 * hash) + PEER_CONFIG_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerConfig().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.AddReplicationPeerRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.AddReplicationPeerRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getPeerConfigFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = null;
+        } else {
+          peerConfigBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (peerConfigBuilder_ == null) {
+          result.peerConfig_ = peerConfig_;
+        } else {
+          result.peerConfig_ = peerConfigBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        if (other.hasPeerConfig()) {
+          mergePeerConfig(other.getPeerConfig());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        if (!hasPeerConfig()) {
+          return false;
+        }
+        if (!getPeerConfig().isInitialized()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer peerConfig_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> peerConfigBuilder_;
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public boolean hasPeerConfig() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer getPeerConfig() {
+        if (peerConfigBuilder_ == null) {
+          return peerConfig_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+        } else {
+          return peerConfigBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if (peerConfigBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          peerConfig_ = value;
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder setPeerConfig(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder builderForValue) {
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = builderForValue.build();
+          onChanged();
+        } else {
+          peerConfigBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder mergePeerConfig(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer value) {
+        if (peerConfigBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              peerConfig_ != null &&
+              peerConfig_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance()) {
+            peerConfig_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.newBuilder(peerConfig_).mergeFrom(value).buildPartial();
+          } else {
+            peerConfig_ = value;
+          }
+          onChanged();
+        } else {
+          peerConfigBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public Builder clearPeerConfig() {
+        if (peerConfigBuilder_ == null) {
+          peerConfig_ = null;
+          onChanged();
+        } else {
+          peerConfigBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder getPeerConfigBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getPeerConfigFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder getPeerConfigOrBuilder() {
+        if (peerConfigBuilder_ != null) {
+          return peerConfigBuilder_.getMessageOrBuilder();
+        } else {
+          return peerConfig_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.getDefaultInstance() : peerConfig_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.ReplicationPeer peer_config = 2;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder> 
+          getPeerConfigFieldBuilder() {
+        if (peerConfigBuilder_ == null) {
+          peerConfigBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeer.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.ReplicationPeerOrBuilder>(
+                  getPeerConfig(),
+                  getParentForChildren(),
+                  isClean());
+          peerConfig_ = null;
+        }
+        return peerConfigBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.AddReplicationPeerRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.AddReplicationPeerRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<AddReplicationPeerRequest>() {
+      public AddReplicationPeerRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new AddReplicationPeerRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface AddReplicationPeerResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.AddReplicationPeerResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.AddReplicationPeerResponse}
+   */
+  public  static final class AddReplicationPeerResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.AddReplicationPeerResponse)
+      AddReplicationPeerResponseOrBuilder {
+    // Use AddReplicationPeerResponse.newBuilder() to construct.
+    private AddReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private AddReplicationPeerResponse() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private AddReplicationPeerResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.AddReplicationPeerResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.AddReplicationPeerResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_AddReplicationPeerResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.AddReplicationPeerResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.AddReplicationPeerResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<AddReplicationPeerResponse>() {
+      public AddReplicationPeerResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new AddReplicationPeerResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<AddReplicationPeerResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface RemoveReplicationPeerRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RemoveReplicationPeerRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    boolean hasPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    java.lang.String getPeerId();
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RemoveReplicationPeerRequest}
+   */
+  public  static final class RemoveReplicationPeerRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RemoveReplicationPeerRequest)
+      RemoveReplicationPeerRequestOrBuilder {
+    // Use RemoveReplicationPeerRequest.newBuilder() to construct.
+    private RemoveReplicationPeerRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private RemoveReplicationPeerRequest() {
+      peerId_ = "";
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RemoveReplicationPeerRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000001;
+              peerId_ = bs;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PEER_ID_FIELD_NUMBER = 1;
+    private volatile java.lang.Object peerId_;
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public boolean hasPeerId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public java.lang.String getPeerId() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerId_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string peer_id = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getPeerIdBytes() {
+      java.lang.Object ref = peerId_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerId_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasPeerId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 1, peerId_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(1, peerId_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) obj;
+
+      boolean result = true;
+      result = result && (hasPeerId() == other.hasPeerId());
+      if (hasPeerId()) {
+        result = result && getPeerId()
+            .equals(other.getPeerId());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerId()) {
+        hash = (37 * hash) + PEER_ID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerId().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RemoveReplicationPeerRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RemoveReplicationPeerRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        peerId_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.peerId_ = peerId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance()) return this;
+        if (other.hasPeerId()) {
+          bitField0_ |= 0x00000001;
+          peerId_ = other.peerId_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerId()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.lang.Object peerId_ = "";
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public boolean hasPeerId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public java.lang.String getPeerId() {
+        java.lang.Object ref = peerId_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            peerId_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getPeerIdBytes() {
+        java.lang.Object ref = peerId_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerId_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerId(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder clearPeerId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerId_ = getDefaultInstance().getPeerId();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peer_id = 1;</code>
+       */
+      public Builder setPeerIdBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerId_ = value;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RemoveReplicationPeerRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RemoveReplicationPeerRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RemoveReplicationPeerRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<RemoveReplicationPeerRequest>() {
+      public RemoveReplicationPeerRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new RemoveReplicationPeerRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RemoveReplicationPeerRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RemoveReplicationPeerRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface RemoveReplicationPeerResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RemoveReplicationPeerResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RemoveReplicationPeerResponse}
+   */
+  public  static final class RemoveReplicationPeerResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RemoveReplicationPeerResponse)
+      RemoveReplicationPeerResponseOrBuilder {
+    // Use RemoveReplicationPeerResponse.newBuilder() to construct.
+    private RemoveReplicationPeerResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private RemoveReplicationPeerResponse() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RemoveReplicationPeerResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static_hbase_pb_RemoveReplicationPeerResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RemoveReplicationPeerResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RemoveReplicationPeerResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.internal_static

<TRUNCATED>