You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/05/26 23:47:16 UTC

[08/59] [abbrv] hbase git commit: Revert "HBASE-14614 Procedure v2 - Core Assignment Manager (Matteo Bertozzi)" Revert a mistaken commit!!!

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index ed19dc9..b3ca94d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -25,17 +27,8 @@ import java.net.BindException;
 import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 import java.util.Map.Entry;
-import java.util.NavigableMap;
-import java.util.Set;
-import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -79,7 +72,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
 import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
@@ -127,12 +119,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
@@ -145,8 +137,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
@@ -204,6 +194,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos.ScanMet
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaSnapshotsResponse.TableQuotaSnapshot;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescriptor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -221,8 +212,6 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * Implements the regionserver RPC services.
  */
@@ -1476,6 +1465,36 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
+  @Override
+  @QosPriority(priority=HConstants.ADMIN_QOS)
+  public CloseRegionForSplitOrMergeResponse closeRegionForSplitOrMerge(
+      final RpcController controller,
+      final CloseRegionForSplitOrMergeRequest request) throws ServiceException {
+    try {
+      checkOpen();
+
+      List<String> encodedRegionNameList = new ArrayList<>();
+      for(int i = 0; i < request.getRegionCount(); i++) {
+        final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion(i));
+
+        // Can be null if we're calling close on a region that's not online
+        final Region targetRegion = regionServer.getFromOnlineRegions(encodedRegionName);
+        if ((targetRegion != null) && (targetRegion.getCoprocessorHost() != null)) {
+          targetRegion.getCoprocessorHost().preClose(false);
+          encodedRegionNameList.add(encodedRegionName);
+        }
+      }
+      requestCount.increment();
+      LOG.info("Close and offline " + encodedRegionNameList + " regions.");
+      boolean closed = regionServer.closeAndOfflineRegionForSplitOrMerge(encodedRegionNameList);
+      CloseRegionForSplitOrMergeResponse.Builder builder =
+          CloseRegionForSplitOrMergeResponse.newBuilder().setClosed(closed);
+      return builder.build();
+    } catch (IOException ie) {
+      throw new ServiceException(ie);
+    }
+  }
+
   /**
    * Compact a region on the region server.
    *
@@ -1626,8 +1645,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       if (request.hasCompactionState() && request.getCompactionState()) {
         builder.setCompactionState(region.getCompactionState());
       }
-      builder.setSplittable(region.isSplittable());
-      builder.setMergeable(region.isMergeable());
       builder.setIsRecovering(region.isRecovering());
       return builder.build();
     } catch (IOException ie) {
@@ -1838,11 +1855,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           // The region is already online. This should not happen any more.
           String error = "Received OPEN for the region:"
             + region.getRegionNameAsString() + ", which is already online";
-          LOG.warn(error);
-          //regionServer.abort(error);
-          //throw new IOException(error);
-          builder.addOpeningState(RegionOpeningState.OPENED);
-          continue;
+          regionServer.abort(error);
+          throw new IOException(error);
         }
         LOG.info("Open " + region.getRegionNameAsString());
 
@@ -3382,62 +3396,4 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       throw new ServiceException(e);
     }
   }
-
-  @Override
-  public ExecuteProceduresResponse executeProcedures(RpcController controller,
-       ExecuteProceduresRequest request) throws ServiceException {
-    ExecuteProceduresResponse.Builder builder = ExecuteProceduresResponse.newBuilder();
-    if (request.getOpenRegionCount() > 0) {
-      for (OpenRegionRequest req: request.getOpenRegionList()) {
-        builder.addOpenRegion(openRegion(controller, req));
-      }
-     }
-     if (request.getCloseRegionCount() > 0) {
-       for (CloseRegionRequest req: request.getCloseRegionList()) {
-         builder.addCloseRegion(closeRegion(controller, req));
-       }
-     }
-     return builder.build();
-  }
-
-  /**
-   * Merge regions on the region server.
-   *
-   * @param controller the RPC controller
-   * @param request the request
-   * @return merge regions response
-   * @throws ServiceException
-   */
-  @Override
-  @QosPriority(priority = HConstants.ADMIN_QOS)
-  // UNUSED AS OF AMv2 PURGE!
-  public MergeRegionsResponse mergeRegions(final RpcController controller,
-      final MergeRegionsRequest request) throws ServiceException {
-    try {
-      checkOpen();
-      requestCount.increment();
-      Region regionA = getRegion(request.getRegionA());
-      Region regionB = getRegion(request.getRegionB());
-      boolean forcible = request.getForcible();
-      long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
-      regionA.startRegionOperation(Operation.MERGE_REGION);
-      regionB.startRegionOperation(Operation.MERGE_REGION);
-      if (regionA.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
-          regionB.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
-        throw new ServiceException(new MergeRegionException("Can't merge non-default replicas"));
-      }
-      LOG.info("Receiving merging request for  " + regionA + ", " + regionB
-          + ",forcible=" + forcible);
-      regionA.flush(true);
-      regionB.flush(true);
-      regionServer.compactSplitThread.requestRegionsMerge(regionA, regionB, forcible,
-          masterSystemTime, RpcServer.getRequestUser());
-      return MergeRegionsResponse.newBuilder().build();
-    } catch (DroppedSnapshotException ex) {
-      regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
-      throw new ServiceException(ex);
-    } catch (IOException ie) {
-      throw new ServiceException(ie);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 6c4eca9..295b825 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -96,14 +96,6 @@ public interface Region extends ConfigurationObserver {
   /** @return True if region is read only */
   boolean isReadOnly();
 
-  /** @return true if region is splittable */
-  boolean isSplittable();
-
-  /**
-   * @return true if region is mergeable
-   */
-  boolean isMergeable();
-
   /**
    * Return the list of Stores managed by this region
    * <p>Use with caution.  Exposed for use of fixup utilities.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
deleted file mode 100644
index e95932b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.security.PrivilegedAction;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Handles processing region merges. Put in a queue, owned by HRegionServer.
- */
-// UNUSED: REMOVE!!!
-@InterfaceAudience.Private
-class RegionMergeRequest implements Runnable {
-  private static final Log LOG = LogFactory.getLog(RegionMergeRequest.class);
-  private final HRegionInfo region_a;
-  private final HRegionInfo region_b;
-  private final HRegionServer server;
-  private final boolean forcible;
-  private final User user;
-
-  RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible,
-      long masterSystemTime, User user) {
-    Preconditions.checkNotNull(hrs);
-    this.region_a = a.getRegionInfo();
-    this.region_b = b.getRegionInfo();
-    this.server = hrs;
-    this.forcible = forcible;
-    this.user = user;
-  }
-
-  @Override
-  public String toString() {
-    return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible="
-        + forcible;
-  }
-
-  private void doMerge() {
-    boolean success = false;
-    //server.metricsRegionServer.incrMergeRequest();
-
-    if (user != null && user.getUGI() != null) {
-      user.getUGI().doAs (new PrivilegedAction<Void>() {
-        @Override
-        public Void run() {
-          requestRegionMerge();
-          return null;
-        }
-      });
-    } else {
-      requestRegionMerge();
-    }
-  }
-
-  private void requestRegionMerge() {
-    final TableName table = region_a.getTable();
-    if (!table.equals(region_b.getTable())) {
-      LOG.error("Can't merge regions from two different tables: " + region_a + ", " + region_b);
-      return;
-    }
-
-    // TODO: fake merged region for compat with the report protocol
-    final HRegionInfo merged = new HRegionInfo(table);
-
-    // Send the split request to the master. the master will do the validation on the split-key.
-    // The parent region will be unassigned and the two new regions will be assigned.
-    // hri_a and hri_b objects may not reflect the regions that will be created, those objectes
-    // are created just to pass the information to the reportRegionStateTransition().
-    if (!server.reportRegionStateTransition(TransitionCode.READY_TO_MERGE, merged, region_a, region_b)) {
-      LOG.error("Unable to ask master to merge: " + region_a + ", " + region_b);
-    }
-  }
-
-  @Override
-  public void run() {
-    if (this.server.isStopping() || this.server.isStopped()) {
-      LOG.debug("Skipping merge because server is stopping="
-          + this.server.isStopping() + " or stopped=" + this.server.isStopped());
-      return;
-    }
-
-    doMerge();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 5afa652..54aeaa6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -183,6 +183,16 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
   boolean reportRegionStateTransition(TransitionCode code, HRegionInfo... hris);
 
   /**
+   * Notify master that a region wants to be splitted.
+   */
+  long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow);
+
+  /**
+   * Check with master whether a procedure is completed (either succeed or fail)
+   */
+  boolean isProcedureFinished(final long procId) throws IOException;
+
+  /**
    * Returns a reference to the region server's RPC server
    */
   RpcServerInterface getRpcServer();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionUnassigner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionUnassigner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionUnassigner.java
index 8eb78a2..b347b4b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionUnassigner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionUnassigner.java
@@ -48,7 +48,8 @@ class RegionUnassigner {
       return;
     }
     unassigning = true;
-    new Thread("RegionUnassigner." + regionInfo.getEncodedName()) {
+    new Thread("Unassign-" + regionInfo) {
+
       @Override
       public void run() {
         LOG.info("Unassign " + regionInfo.getRegionNameAsString());
@@ -64,4 +65,4 @@ class RegionUnassigner {
       }
     }.start();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
index bd59c53..eb9811d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitRequest.java
@@ -18,16 +18,16 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.io.IOException;
 import java.security.PrivilegedAction;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.ipc.RemoteException;
 
 import com.google.common.base.Preconditions;
 
@@ -37,14 +37,14 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 class SplitRequest implements Runnable {
   private static final Log LOG = LogFactory.getLog(SplitRequest.class);
-  private final HRegionInfo parent;
+  private final HRegion parent;
   private final byte[] midKey;
   private final HRegionServer server;
   private final User user;
 
   SplitRequest(Region region, byte[] midKey, HRegionServer hrs, User user) {
     Preconditions.checkNotNull(hrs);
-    this.parent = region.getRegionInfo();
+    this.parent = (HRegion)region;
     this.midKey = midKey;
     this.server = hrs;
     this.user = user;
@@ -56,30 +56,67 @@ class SplitRequest implements Runnable {
   }
 
   private void doSplitting() {
+    boolean success = false;
     server.metricsRegionServer.incrSplitRequest();
-    if (user != null && user.getUGI() != null) {
-      user.getUGI().doAs (new PrivilegedAction<Void>() {
-        @Override
-        public Void run() {
-          requestRegionSplit();
-          return null;
+    long startTime = EnvironmentEdgeManager.currentTime();
+
+    try {
+      long procId;
+      if (user != null && user.getUGI() != null) {
+        procId = user.getUGI().doAs (new PrivilegedAction<Long>() {
+          @Override
+          public Long run() {
+            try {
+              return server.requestRegionSplit(parent.getRegionInfo(), midKey);
+            } catch (Exception e) {
+              LOG.error("Failed to complete region split ", e);
+            }
+            return (long)-1;
+          }
+        });
+      } else {
+        procId = server.requestRegionSplit(parent.getRegionInfo(), midKey);
+      }
+
+      if (procId != -1) {
+        // wait for the split to complete or get interrupted.  If the split completes successfully,
+        // the procedure will return true; if the split fails, the procedure would throw exception.
+        //
+        try {
+          while (!(success = server.isProcedureFinished(procId))) {
+            try {
+              Thread.sleep(1000);
+            } catch (InterruptedException e) {
+              LOG.warn("Split region " + parent + " is still in progress.  Not waiting...");
+              break;
+            }
+          }
+        } catch (IOException e) {
+          LOG.error("Split region " + parent + " failed.", e);
         }
-      });
-    } else {
-      requestRegionSplit();
-    }
-  }
+      } else {
+        LOG.error("Fail to split region " + parent);
+      }
+    } finally {
+      if (this.parent.getCoprocessorHost() != null) {
+        try {
+          this.parent.getCoprocessorHost().postCompleteSplit();
+        } catch (IOException io) {
+          LOG.error("Split failed " + this,
+            io instanceof RemoteException ? ((RemoteException) io).unwrapRemoteException() : io);
+        }
+      }
+
+      // Update regionserver metrics with the split transaction total running time
+      server.metricsRegionServer.updateSplitTime(EnvironmentEdgeManager.currentTime() - startTime);
+
+      if (parent.shouldForceSplit()) {
+        parent.clearSplit();
+      }
 
-  private void requestRegionSplit() {
-    final TableName table = parent.getTable();
-    final HRegionInfo hri_a = new HRegionInfo(table, parent.getStartKey(), midKey);
-    final HRegionInfo hri_b = new HRegionInfo(table, midKey, parent.getEndKey());
-    // Send the split request to the master. the master will do the validation on the split-key.
-    // The parent region will be unassigned and the two new regions will be assigned.
-    // hri_a and hri_b objects may not reflect the regions that will be created, those objects
-    // are created just to pass the information to the reportRegionStateTransition().
-    if (!server.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT, parent, hri_a, hri_b)) {
-      LOG.error("Unable to ask master to split " + parent.getRegionNameAsString());
+      if (success) {
+        server.metricsRegionServer.incrSplitSuccess();
+      }
     }
   }
 
@@ -93,4 +130,4 @@ class SplitRequest implements Runnable {
 
     doSplitting();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
index 3ecc750..5ff7a1e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseRegionHandler.java
@@ -125,4 +125,4 @@ public class CloseRegionHandler extends EventHandler {
         remove(this.regionInfo.getEncodedNameAsBytes(), Boolean.FALSE);
     }
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index f1e42a6..dca02e4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -559,7 +559,7 @@ public class HBaseFsck extends Configured implements Closeable {
     errors.print("Number of requests: " + status.getRequestsCount());
     errors.print("Number of regions: " + status.getRegionsCount());
 
-    List<RegionState> rits = status.getRegionsInTransition();
+    Set<RegionState> rits = status.getRegionsInTransition();
     errors.print("Number of regions in transition: " + rits.size());
     if (details) {
       for (RegionState state: rits) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
index 8ea7012..d7749c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 
 /**
  * Utility methods for interacting with the regions.
@@ -223,7 +223,7 @@ public abstract class ModifyRegionUtils {
   static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration conf,
       final String threadNamePrefix, int regionNumber) {
     int maxThreads = Math.min(regionNumber, conf.getInt(
-        "hbase.hregion.open.and.init.threads.max", 16));
+        "hbase.hregion.open.and.init.threads.max", 10));
     ThreadPoolExecutor regionOpenAndInitThreadPool = Threads
     .getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS,
         new ThreadFactory() {
@@ -236,4 +236,24 @@ public abstract class ModifyRegionUtils {
         });
     return regionOpenAndInitThreadPool;
   }
+
+  /**
+   * Triggers a bulk assignment of the specified regions
+   *
+   * @param assignmentManager the Assignment Manger
+   * @param regionInfos the list of regions to assign
+   * @throws IOException if an error occurred during the assignment
+   */
+  public static void assignRegions(final AssignmentManager assignmentManager,
+      final List<HRegionInfo> regionInfos) throws IOException {
+    try {
+      assignmentManager.getRegionStates().createRegionStates(regionInfos);
+      assignmentManager.assign(regionInfos);
+    } catch (InterruptedException e) {
+      LOG.error("Caught " + e + " during round-robin assignment");
+      InterruptedIOException ie = new InterruptedIOException(e.getMessage());
+      ie.initCause(e);
+      throw ie;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 517a0cd..e8069ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -289,8 +289,8 @@ public class WALSplitter {
     this.fileBeingSplit = logfile;
     try {
       long logLength = logfile.getLen();
-      LOG.info("Splitting WAL=" + logPath + ", length=" + logLength +
-          ", distributedLogReplay=" + this.distributedLogReplay);
+      LOG.info("Splitting wal: " + logPath + ", length=" + logLength);
+      LOG.info("DistributedLogReplay = " + this.distributedLogReplay);
       status.setStatus("Opening log file");
       if (reporter != null && !reporter.progress()) {
         progress_failed = true;
@@ -298,7 +298,7 @@ public class WALSplitter {
       }
       in = getReader(logfile, skipErrors, reporter);
       if (in == null) {
-        LOG.warn("Nothing to split in WAL=" + logPath);
+        LOG.warn("Nothing to split in log file " + logPath);
         return true;
       }
       int numOpenedFilesBeforeReporting = conf.getInt("hbase.splitlog.report.openedfiles", 3);
@@ -377,7 +377,7 @@ public class WALSplitter {
       iie.initCause(ie);
       throw iie;
     } catch (CorruptedLogFileException e) {
-      LOG.warn("Could not parse, corrupted WAL=" + logPath, e);
+      LOG.warn("Could not parse, corrupted log file " + logPath, e);
       if (this.csm != null) {
         // Some tests pass in a csm of null.
         this.csm.getSplitLogWorkerCoordination().markCorrupted(rootDir,
@@ -397,7 +397,7 @@ public class WALSplitter {
           in.close();
         }
       } catch (IOException exception) {
-        LOG.warn("Could not close WAL reader: " + exception.getMessage());
+        LOG.warn("Could not close wal reader: " + exception.getMessage());
         LOG.debug("exception details", exception);
       }
       try {
@@ -1595,10 +1595,8 @@ public class WALSplitter {
           if (wap == null) {
             wap = getWriterAndPath(logEntry);
             if (wap == null) {
-              if (LOG.isTraceEnabled()) {
-                // This log spews the full edit. Can be massive in the log. Enable only debugging
-                // WAL lost edit issues.
-                LOG.trace("getWriterAndPath decided we don't need to write edits for " + logEntry);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("getWriterAndPath decided we don't need to write edits for " + logEntry);
               }
               return;
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
index a6a5c17..69cd233 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/RegionServerTracker.java
@@ -88,8 +88,8 @@ public class RegionServerTracker extends ZooKeeperListener {
               int magicLen = ProtobufUtil.lengthOfPBMagic();
               ProtobufUtil.mergeFrom(rsInfoBuilder, data, magicLen, data.length - magicLen);
             }
-            if (LOG.isTraceEnabled()) {
-              LOG.trace("Added tracking of RS " + nodePath);
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("Added tracking of RS " + nodePath);
             }
           } catch (KeeperException e) {
             LOG.warn("Get Rs info port from ephemeral node", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 5c8b29b..afc070d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -86,10 +86,10 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.mapreduce.MapreduceTestingShim;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.regionserver.ChunkCreator;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -3323,14 +3323,13 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   public void moveRegionAndWait(HRegionInfo destRegion, ServerName destServer)
       throws InterruptedException, IOException {
     HMaster master = getMiniHBaseCluster().getMaster();
-    // TODO: Here we start the move. The move can take a while.
-    getAdmin().move(destRegion.getEncodedNameAsBytes(),
+    getHBaseAdmin().move(destRegion.getEncodedNameAsBytes(),
         Bytes.toBytes(destServer.getServerName()));
     while (true) {
       ServerName serverName = master.getAssignmentManager().getRegionStates()
           .getRegionServerOfRegion(destRegion);
       if (serverName != null && serverName.equals(destServer)) {
-        assertRegionOnServer(destRegion, serverName, 2000);
+        assertRegionOnServer(destRegion, serverName, 200);
         break;
       }
       Thread.sleep(10);
@@ -3995,7 +3994,8 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
         if (master == null) return false;
         AssignmentManager am = master.getAssignmentManager();
         if (am == null) return false;
-        return !am.hasRegionsInTransition();
+        final RegionStates regionStates = am.getRegionStates();
+        return !regionStates.isRegionsInTransition();
       }
     };
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index a99345b..eefde94 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -302,6 +302,16 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
+  public long requestRegionSplit(final HRegionInfo regionInfo, final byte[] splitRow) {
+    return -1;
+  }
+
+  @Override
+  public boolean isProcedureFinished(final long procId) {
+    return false;
+  }
+
+  @Override
   public boolean registerService(Service service) {
     // TODO Auto-generated method stub
     return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
index cff1a8d..283d79d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionRebalancing.java
@@ -21,18 +21,13 @@ package org.apache.hadoop.hbase;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.FlakeyTests;
@@ -42,18 +37,21 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
 /**
  * Test whether region re-balancing works. (HBASE-71)
  */
-@Ignore // This is broken since new RegionServers does proper average of regions
-// and because Master is treated as a regionserver though it hosts two regions only.
 @Category({FlakeyTests.class, LargeTests.class})
 @RunWith(value = Parameterized.class)
 public class TestRegionRebalancing {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index f84d9c2..7b69db4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -42,18 +42,23 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.InvalidFamilyOperationException;
+import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.exceptions.MergeRegionException;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -98,7 +103,7 @@ public class TestAdmin1 {
 
   @Before
   public void setUp() throws Exception {
-    this.admin = TEST_UTIL.getAdmin();
+    this.admin = TEST_UTIL.getHBaseAdmin();
   }
 
   @After
@@ -746,7 +751,7 @@ public class TestAdmin1 {
 
     desc = new HTableDescriptor(TABLE_2);
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
-    admin = TEST_UTIL.getAdmin();
+    admin = TEST_UTIL.getHBaseAdmin();
     admin.createTable(desc, startKey, endKey, expectedRegions);
 
     try (RegionLocator l = TEST_UTIL.getConnection().getRegionLocator(TABLE_2)) {
@@ -801,7 +806,7 @@ public class TestAdmin1 {
 
     desc = new HTableDescriptor(TABLE_3);
     desc.addFamily(new HColumnDescriptor(HConstants.CATALOG_FAMILY));
-    admin = TEST_UTIL.getAdmin();
+    admin = TEST_UTIL.getHBaseAdmin();
     admin.createTable(desc, startKey, endKey, expectedRegions);
 
 
@@ -987,7 +992,7 @@ public class TestAdmin1 {
       sb.append("_").append(Integer.toString(rowCounts[i]));
     }
     assertFalse(admin.tableExists(tableName));
-    try (final Table table = TEST_UTIL.createTable(tableName, familyNames,
+    try(final Table table = TEST_UTIL.createTable(tableName, familyNames,
       numVersions, blockSize);
       final RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
 
@@ -1052,7 +1057,8 @@ public class TestAdmin1 {
             } catch (IOException e) {
               e.printStackTrace();
             }
-            if (regions == null) continue;
+            if (regions == null)
+              continue;
             count.set(regions.size());
             if (count.get() >= 2) {
               LOG.info("Found: " + regions);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index 607fc61..0014401 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -54,7 +54,7 @@ import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -528,6 +528,8 @@ public class TestAdmin2 {
     List<HRegionInfo> tableRegions = localAdmin.getTableRegions(tableName);
     HRegionInfo hri = tableRegions.get(0);
     AssignmentManager am = master.getAssignmentManager();
+    assertTrue("Region " + hri.getRegionNameAsString()
+      + " should be assigned properly", am.waitForAssignment(hri));
     ServerName server = am.getRegionStates().getRegionServerOfRegion(hri);
     localAdmin.move(hri.getEncodedNameAsBytes(), Bytes.toBytes(server.getServerName()));
     assertEquals("Current region server and region server before move should be same.", server,

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
index 074a421..04bd224 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionAdminApi.java
@@ -26,7 +26,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.stream.Collectors;
 
@@ -35,14 +34,14 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.NoSuchProcedureException;
 import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.master.assignment.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -53,7 +52,6 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.Assert;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -323,10 +321,17 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
       desc.addFamily(new HColumnDescriptor(FAMILY));
       admin.createTable(desc).get();
 
+      // add region to meta.
+      Table meta = TEST_UTIL.getConnection().getTable(TableName.META_TABLE_NAME);
+      HRegionInfo hri =
+          new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
+      MetaTableAccessor.addRegionToMeta(meta, hri);
+
       // assign region.
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
       AssignmentManager am = master.getAssignmentManager();
-      HRegionInfo hri = am.getRegionStates().getRegionsOfTable(tableName).get(0);
+      admin.assign(hri.getRegionName()).get();
+      am.waitForAssignment(hri);
 
       // assert region on server
       RegionStates regionStates = am.getRegionStates();
@@ -335,32 +340,22 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
       assertTrue(regionStates.getRegionState(hri).isOpened());
 
       // Region is assigned now. Let's assign it again.
-      // Master should not abort, and region should stay assigned.
+      // Master should not abort, and region should be assigned.
       admin.assign(hri.getRegionName()).get();
-      try {
-        am.waitForAssignment(hri);
-        fail("Expected NoSuchProcedureException");
-      } catch (NoSuchProcedureException e) {
-        // Expected
-      }
+      am.waitForAssignment(hri);
       assertTrue(regionStates.getRegionState(hri).isOpened());
 
       // unassign region
       admin.unassign(hri.getRegionName(), true).get();
-      try {
-        am.waitForAssignment(hri);
-        fail("Expected NoSuchProcedureException");
-      } catch (NoSuchProcedureException e) {
-        // Expected
-      }
-      assertTrue(regionStates.getRegionState(hri).isClosed());
+      am.waitForAssignment(hri);
+      assertTrue(regionStates.getRegionState(hri).isOpened());
     } finally {
       TEST_UTIL.deleteTable(tableName);
     }
   }
 
   HRegionInfo createTableAndGetOneRegion(final TableName tableName)
-      throws IOException, InterruptedException, ExecutionException {
+      throws IOException, InterruptedException {
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(new HColumnDescriptor(FAMILY));
     admin.createTable(desc, Bytes.toBytes("A"), Bytes.toBytes("Z"), 5);
@@ -382,12 +377,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
     }
   }
 
-  @Ignore @Test
-  // Turning off this tests in AMv2. Doesn't make sense.Offlining means something
-  // different now.
-  // You can't 'offline' a region unless you know what you are doing
-  // Will cause the Master to tell the regionserver to shut itself down because
-  // regionserver is reporting the state as OPEN.
+  @Test
   public void testOfflineRegion() throws Exception {
     final TableName tableName = TableName.valueOf("testOfflineRegion");
     try {
@@ -395,6 +385,8 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
 
       RegionStates regionStates =
           TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().getRegionStates();
+      ServerName serverName = regionStates.getRegionServerOfRegion(hri);
+      TEST_UTIL.assertRegionOnServer(hri, serverName, 200);
       admin.offline(hri.getRegionName()).get();
 
       long timeoutTime = System.currentTimeMillis() + 3000;
@@ -450,7 +442,7 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
         if (now > timeoutTime) {
           fail("Failed to move the region in time: " + regionStates.getRegionState(hri));
         }
-        regionStates.wait(50);
+        regionStates.waitForUpdate(50);
       }
     } finally {
       TEST_UTIL.deleteTable(tableName);
@@ -474,9 +466,6 @@ public class TestAsyncRegionAdminApi extends TestAsyncAdminBase {
             regionServerCount.incrementAndGet();
           });
       Assert.assertEquals(regionServerCount.get(), 2);
-    } catch (Exception e) {
-      LOG.info("Exception", e);
-      throw e;
     } finally {
       TEST_UTIL.deleteTable(tableName);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 2abc54d..913c2e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -47,14 +47,12 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
  * Will split the table, and move region randomly when testing.
  */
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreaded {
   private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
index b9f11d5..8743266 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithBasicCompaction.java
@@ -17,14 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.MemoryCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends
     TestAsyncTableGetMultiThreaded {
@@ -33,4 +32,5 @@ public class TestAsyncTableGetMultiThreadedWithBasicCompaction extends
   public static void setUp() throws Exception {
     setUp(MemoryCompactionPolicy.BASIC);
   }
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
index dd94398..ef75373 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreadedWithEagerCompaction.java
@@ -17,14 +17,13 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.MemoryCompactionPolicy;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.experimental.categories.Category;
 
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends
     TestAsyncTableGetMultiThreaded {
@@ -33,4 +32,5 @@ public class TestAsyncTableGetMultiThreadedWithEagerCompaction extends
   public static void setUp() throws Exception {
     setUp(MemoryCompactionPolicy.EAGER);
   }
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
index 6e50312..023095f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestBlockEvictionFromClient.java
@@ -593,14 +593,12 @@ public class TestBlockEvictionFromClient {
       put.addColumn(FAMILY, QUALIFIER2, data2);
       table.put(put);
       region.flush(true);
-      LOG.info("About to SPLIT on " + Bytes.toString(ROW1));
       TEST_UTIL.getAdmin().split(tableName, ROW1);
       List<HRegionInfo> tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
       // Wait for splits
       while (tableRegions.size() != 2) {
         tableRegions = TEST_UTIL.getAdmin().getTableRegions(tableName);
         Thread.sleep(100);
-        LOG.info("Waiting on SPLIT to complete...");
       }
       region.compact(true);
       Iterator<CachedBlock> iterator = cache.iterator();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
index e1277fa..80b7208 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestEnableTable.java
@@ -18,15 +18,14 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.io.IOException;
 import java.util.concurrent.CountDownLatch;
 
+import com.google.common.base.Predicate;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -37,11 +36,16 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -53,9 +57,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import com.google.common.base.Predicate;
-import com.google.common.collect.Iterables;
-import com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 @Category({ MasterTests.class, MediumTests.class })
 public class TestEnableTable {
@@ -101,17 +105,15 @@ public class TestEnableTable {
     rs.getRegionServer().stop("stop");
     cluster.waitForRegionServerToStop(rs.getRegionServer().getServerName(), 10000);
 
-    // We used to enable the table here but AMv2 would hang waiting on a RS to check-in.
-    // Revisit.
+    LOG.debug("Now enabling table " + tableName);
+
+    admin.enableTable(tableName);
+    assertTrue(admin.isTableEnabled(tableName));
 
     JVMClusterUtil.RegionServerThread rs2 = cluster.startRegionServer();
     cluster.waitForRegionServerToStart(rs2.getRegionServer().getServerName().getHostname(),
         rs2.getRegionServer().getServerName().getPort(), 60000);
 
-    LOG.debug("Now enabling table " + tableName);
-    admin.enableTable(tableName);
-    assertTrue(admin.isTableEnabled(tableName));
-
     List<HRegionInfo> regions = TEST_UTIL.getAdmin().getTableRegions(tableName);
     assertEquals(1, regions.size());
     for (HRegionInfo region : regions) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 9eaa716..7f44a2a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -677,8 +677,6 @@ public class TestFromClientSide3 {
             });
           fail("This cp should fail because the target lock is blocked by previous put");
         } catch (Throwable ex) {
-          // TODO!!!! Is this right? It catches everything including the above fail
-          // if it happens (which it seems too....)
         }
       });
       cpService.shutdown();

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
index e99ee07..bfe10b5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestHCM.java
@@ -18,12 +18,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+import com.google.common.collect.Lists;
 
 import java.io.IOException;
 import java.lang.reflect.Field;
@@ -65,6 +60,7 @@ import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.ServerTooBusyException;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -86,7 +82,12 @@ import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
-import com.google.common.collect.Lists;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 /**
  * This class is for testing HBaseConnectionManager features
@@ -230,6 +231,8 @@ public class TestHCM {
     TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, RPC_RETRY);
     // simulate queue blocking in testDropTimeoutRequest
     TEST_UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 1);
+    // Used in testServerBusyException
+    TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_PERSERVER_REQUESTS_THRESHOLD, 3);
     TEST_UTIL.startMiniCluster(2);
   }
 
@@ -967,7 +970,7 @@ public class TestHCM {
    * that we really delete it.
    * @throws Exception
    */
-  @Ignore @Test
+  @Test
   public void testRegionCaching() throws Exception{
     TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAM_NAM).close();
     Configuration conf =  new Configuration(TEST_UTIL.getConfiguration());
@@ -1033,7 +1036,7 @@ public class TestHCM {
     Assert.assertNotNull(curServer.getOnlineRegion(regionName));
     Assert.assertNull(destServer.getOnlineRegion(regionName));
     Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().
-        getAssignmentManager().hasRegionsInTransition());
+        getAssignmentManager().getRegionStates().isRegionsInTransition());
 
     // Moving. It's possible that we don't have all the regions online at this point, so
     //  the test must depends only on the region we're looking at.
@@ -1046,7 +1049,7 @@ public class TestHCM {
     while (destServer.getOnlineRegion(regionName) == null ||
         destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
         curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
-        master.getAssignmentManager().hasRegionsInTransition()) {
+        master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
       // wait for the move to be finished
       Thread.sleep(1);
     }
@@ -1105,7 +1108,7 @@ public class TestHCM {
     while (curServer.getOnlineRegion(regionName) == null ||
         destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
         curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
-        master.getAssignmentManager().hasRegionsInTransition()) {
+        master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
       // wait for the move to be finished
       Thread.sleep(1);
     }
@@ -1290,7 +1293,7 @@ public class TestHCM {
     return prevNumRetriesVal;
   }
 
-  @Ignore @Test
+  @Test
   public void testMulti() throws Exception {
     Table table = TEST_UTIL.createMultiRegionTable(TABLE_NAME3, FAM_NAM);
     try {
@@ -1325,8 +1328,6 @@ public class TestHCM {
       HRegionServer destServer = TEST_UTIL.getHBaseCluster().getRegionServer(destServerId);
 
       ServerName destServerName = destServer.getServerName();
-      ServerName metaServerName = TEST_UTIL.getHBaseCluster().getServerHoldingMeta();
-      assertTrue(!destServerName.equals(metaServerName));
 
        //find another row in the cur server that is less than ROW_X
       List<Region> regions = curServer.getOnlineRegions(TABLE_NAME3);
@@ -1352,11 +1353,11 @@ public class TestHCM {
       Assert.assertNotNull(curServer.getOnlineRegion(regionName));
       Assert.assertNull(destServer.getOnlineRegion(regionName));
       Assert.assertFalse(TEST_UTIL.getMiniHBaseCluster().getMaster().
-          getAssignmentManager().hasRegionsInTransition());
+          getAssignmentManager().getRegionStates().isRegionsInTransition());
 
        // Moving. It's possible that we don't have all the regions online at this point, so
-      //  the test depends only on the region we're looking at.
-      LOG.info("Move starting region=" + toMove.getRegionInfo().getRegionNameAsString());
+      //  the test must depends only on the region we're looking at.
+      LOG.info("Move starting region="+toMove.getRegionInfo().getRegionNameAsString());
       TEST_UTIL.getAdmin().move(
           toMove.getRegionInfo().getEncodedNameAsBytes(),
           destServerName.getServerName().getBytes()
@@ -1365,7 +1366,7 @@ public class TestHCM {
       while (destServer.getOnlineRegion(regionName) == null ||
           destServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
           curServer.getRegionsInTransitionInRS().containsKey(encodedRegionNameBytes) ||
-          master.getAssignmentManager().hasRegionsInTransition()) {
+          master.getAssignmentManager().getRegionStates().isRegionsInTransition()) {
         // wait for the move to be finished
         Thread.sleep(1);
       }
@@ -1477,4 +1478,107 @@ public class TestHCM {
     table.close();
     connection.close();
   }
-}
\ No newline at end of file
+
+  private class TestPutThread extends Thread {
+    Table table;
+    int getServerBusyException = 0;
+
+    TestPutThread(Table table){
+      this.table = table;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Put p = new Put(ROW);
+        p.addColumn(FAM_NAM, new byte[]{0}, new byte[]{0});
+        table.put(p);
+      } catch (RetriesExhaustedWithDetailsException e) {
+        if (e.exceptions.get(0) instanceof ServerTooBusyException) {
+          getServerBusyException = 1;
+        }
+      } catch (IOException ignore) {
+      }
+    }
+  }
+
+  private class TestGetThread extends Thread {
+    Table table;
+    int getServerBusyException = 0;
+
+    TestGetThread(Table table){
+      this.table = table;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Get g = new Get(ROW);
+        g.addColumn(FAM_NAM, new byte[] { 0 });
+        table.get(g);
+      } catch (ServerTooBusyException e) {
+        getServerBusyException = 1;
+      } catch (IOException ignore) {
+      }
+    }
+  }
+
+  @Test()
+  public void testServerBusyException() throws Exception {
+    HTableDescriptor hdt = TEST_UTIL.createTableDescriptor(TableName.valueOf(name.getMethodName()));
+    hdt.addCoprocessor(SleepCoprocessor.class.getName());
+    Configuration c = new Configuration(TEST_UTIL.getConfiguration());
+    TEST_UTIL.createTable(hdt, new byte[][] { FAM_NAM }, c);
+
+    TestGetThread tg1 =
+        new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestGetThread tg2 =
+        new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestGetThread tg3 =
+        new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestGetThread tg4 =
+        new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestGetThread tg5 =
+        new TestGetThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    tg1.start();
+    tg2.start();
+    tg3.start();
+    tg4.start();
+    tg5.start();
+    tg1.join();
+    tg2.join();
+    tg3.join();
+    tg4.join();
+    tg5.join();
+    assertEquals(2,
+        tg1.getServerBusyException + tg2.getServerBusyException + tg3.getServerBusyException
+            + tg4.getServerBusyException + tg5.getServerBusyException);
+
+    // Put has its own logic in HTable, test Put alone. We use AsyncProcess for Put (use multi at
+    // RPC level) and it wrap exceptions to RetriesExhaustedWithDetailsException.
+
+    TestPutThread tp1 =
+        new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestPutThread tp2 =
+        new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestPutThread tp3 =
+        new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestPutThread tp4 =
+        new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    TestPutThread tp5 =
+        new TestPutThread(TEST_UTIL.getConnection().getTable(hdt.getTableName()));
+    tp1.start();
+    tp2.start();
+    tp3.start();
+    tp4.start();
+    tp5.start();
+    tp1.join();
+    tp2.join();
+    tp3.join();
+    tp4.join();
+    tp5.join();
+    assertEquals(2,
+        tp1.getServerBusyException + tp2.getServerBusyException + tp3.getServerBusyException
+            + tp4.getServerBusyException + tp5.getServerBusyException);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 3847e6e..a700ebe 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -29,6 +29,7 @@ import java.util.Collection;
 import java.util.List;
 import java.util.concurrent.ExecutorService;
 
+import edu.umd.cs.findbugs.annotations.Nullable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -44,7 +45,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.master.NoSuchProcedureException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
@@ -52,6 +52,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.HBaseFsck;
 import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE;
 import org.apache.hadoop.hbase.util.HBaseFsckRepair;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil;
 import org.apache.hadoop.hbase.zookeeper.LoadBalancerTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
@@ -59,15 +60,12 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.After;
 import org.junit.Before;
-import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 import org.junit.rules.TestRule;
 
-import edu.umd.cs.findbugs.annotations.Nullable;
-
 /**
  * Tests the scenarios where replicas are enabled for the meta table
  */
@@ -107,11 +105,7 @@ public class TestMetaWithReplicas {
     for (int replicaId = 1; replicaId < 3; replicaId ++) {
       HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(HRegionInfo.FIRST_META_REGIONINFO,
         replicaId);
-      try {
-        TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().waitForAssignment(h);
-      } catch (NoSuchProcedureException e) {
-        LOG.info("Presume the procedure has been cleaned up so just proceed: " + e.toString());
-      }
+      TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager().waitForAssignment(h);
     }
     LOG.debug("All meta replicas assigned");
   }
@@ -262,7 +256,7 @@ public class TestMetaWithReplicas {
     }
   }
 
-  @Ignore @Test // Uses FSCK. Needs fixing after HBASE-14614.
+  @Test
   public void testChangingReplicaCount() throws Exception {
     // tests changing the replica count across master restarts
     // reduce the replica count from 3 to 2
@@ -281,9 +275,6 @@ public class TestMetaWithReplicas {
     assert(metaZnodes.size() == originalReplicaCount); //we should have what was configured before
     TEST_UTIL.getHBaseClusterInterface().getConf().setInt(HConstants.META_REPLICAS_NUM,
         newReplicaCount);
-    if (TEST_UTIL.getHBaseCluster().countServedRegions() < newReplicaCount) {
-      TEST_UTIL.getHBaseCluster().startRegionServer();
-    }
     TEST_UTIL.getHBaseClusterInterface().startMaster(sn.getHostname(), 0);
     TEST_UTIL.getHBaseClusterInterface().waitForActiveAndReadyMaster();
     TEST_UTIL.waitFor(10000, predicateMetaHasReplicas(newReplicaCount));
@@ -340,7 +331,7 @@ public class TestMetaWithReplicas {
     HbckTestingUtil.assertNoErrors(hbck);
   }
 
-  @Ignore @Test // Disabled. Relies on FSCK which needs work for AMv2.
+  @Test
   public void testHBaseFsckWithFewerMetaReplicas() throws Exception {
     ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
         TEST_UTIL.getConfiguration());
@@ -358,7 +349,7 @@ public class TestMetaWithReplicas {
     assertErrors(hbck, new ERROR_CODE[]{});
   }
 
-  @Ignore @Test // The close silently doesn't work any more since HBASE-14614. Fix.
+  @Test
   public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
     ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
         TEST_UTIL.getConfiguration());
@@ -392,7 +383,7 @@ public class TestMetaWithReplicas {
     fail("Expected TableNotFoundException");
   }
 
-  @Ignore @Test // Disabled. Currently can't move hbase:meta in AMv2.
+  @Test
   public void testMetaAddressChange() throws Exception {
     // checks that even when the meta's location changes, the various
     // caches update themselves. Uses the master operations to test
@@ -420,16 +411,13 @@ public class TestMetaWithReplicas {
     TEST_UTIL.getAdmin().move(HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes(),
         Bytes.toBytes(moveToServer.getServerName()));
     int i = 0;
-    assert !moveToServer.equals(currentServer);
-    LOG.info("CurrentServer=" + currentServer + ", moveToServer=" + moveToServer);
-    final int max = 10000;
     do {
       Thread.sleep(10);
       data = ZKUtil.getData(zkw, primaryMetaZnode);
       currentServer = ProtobufUtil.toServerName(data);
       i++;
-    } while (!moveToServer.equals(currentServer) && i < max); //wait for 10 seconds overall
-    assert(i != max);
+    } while (!moveToServer.equals(currentServer) && i < 1000); //wait for 10 seconds overall
+    assert(i != 1000);
     TEST_UTIL.getAdmin().disableTable(tableName);
     assertTrue(TEST_UTIL.getAdmin().isTableDisabled(tableName));
   }
@@ -448,7 +436,7 @@ public class TestMetaWithReplicas {
       int i = 0;
       do {
         LOG.debug("Waiting for the replica " + hrl.getRegionInfo() + " to come up");
-        Thread.sleep(10000); //wait for the detection/recovery
+        Thread.sleep(30000); //wait for the detection/recovery
         rl = conn.locateRegion(TableName.META_TABLE_NAME, Bytes.toBytes(""), false, true);
         hrl = rl.getRegionLocation(1);
         i++;
@@ -457,11 +445,14 @@ public class TestMetaWithReplicas {
     }
   }
 
-  @Ignore @Test // Disabled because fsck and this needs work for AMv2
+  @Test
   public void testHBaseFsckWithExcessMetaReplicas() throws Exception {
     // Create a meta replica (this will be the 4th one) and assign it
     HRegionInfo h = RegionReplicaUtil.getRegionInfoForReplica(
         HRegionInfo.FIRST_META_REGIONINFO, 3);
+    // create in-memory state otherwise master won't assign
+    TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
+             .getRegionStates().createRegionState(h);
     TEST_UTIL.assignRegion(h);
     HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getAdmin(), h);
     // check that problem exists

http://git-wip-us.apache.org/repos/asf/hbase/blob/a3c5a744/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index ef00b24..1b18ee2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.client;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -44,7 +43,11 @@ import org.apache.hadoop.hbase.MiniHBaseCluster;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.filter.ColumnPrefixFilter;
 import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionState.State;
+import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -616,33 +619,34 @@ public class TestScannersFromClientSide {
     byte[] regionName = hri.getRegionName();
     int i = cluster.getServerWith(regionName);
     HRegionServer rs = cluster.getRegionServer(i);
-    LOG.info("Unassigning " + hri);
-    TEST_UTIL.getAdmin().unassign(hri.getRegionName(), true);
+    ProtobufUtil.closeRegion(null,
+      rs.getRSRpcServices(), rs.getServerName(), regionName);
     long startTime = EnvironmentEdgeManager.currentTime();
-    long timeOut = 10000;
-    boolean offline = false;
+    long timeOut = 300000;
     while (true) {
       if (rs.getOnlineRegion(regionName) == null) {
-        offline = true;
         break;
       }
       assertTrue("Timed out in closing the testing region",
         EnvironmentEdgeManager.currentTime() < startTime + timeOut);
+      Thread.sleep(500);
     }
-    assertTrue(offline);
-    LOG.info("Assigning " + hri);
-    TEST_UTIL.getAdmin().assign(hri.getRegionName());
+
+    // Now open the region again.
+    HMaster master = cluster.getMaster();
+    RegionStates states = master.getAssignmentManager().getRegionStates();
+    states.regionOffline(hri);
+    states.updateRegionState(hri, State.OPENING);
+    ProtobufUtil.openRegion(null, rs.getRSRpcServices(), rs.getServerName(), hri);
     startTime = EnvironmentEdgeManager.currentTime();
     while (true) {
-      rs = cluster.getRegionServer(cluster.getServerWith(regionName));
-      if (rs != null && rs.getOnlineRegion(regionName) != null) {
-        offline = false;
+      if (rs.getOnlineRegion(regionName) != null) {
         break;
       }
       assertTrue("Timed out in open the testing region",
         EnvironmentEdgeManager.currentTime() < startTime + timeOut);
+      Thread.sleep(500);
     }
-    assertFalse(offline);
 
     // c0:0, c1:1
     kvListExp = new ArrayList<>();