You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2017/01/23 23:01:34 UTC

[01/50] [abbrv] hbase git commit: HBASE-17372 Make AsyncTable thread safe [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-16961 774eef1a3 -> e48b7fa4a (forced update)


HBASE-17372 Make AsyncTable thread safe


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ab95ebb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ab95ebb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ab95ebb

Branch: refs/heads/HBASE-16961
Commit: 4ab95ebbceb144d90e03bce45afa52bcb8c62c54
Parents: 4cb09a4
Author: zhangduo <zh...@apache.org>
Authored: Tue Jan 17 09:55:23 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Jan 17 14:33:28 2017 +0800

----------------------------------------------------------------------
 .../client/AsyncBatchRpcRetryingCaller.java     |  54 ++-------
 .../hadoop/hbase/client/AsyncClientScanner.java |  21 +++-
 .../hadoop/hbase/client/AsyncConnection.java    |  39 ++++--
 .../client/AsyncConnectionConfiguration.java    |  18 ++-
 .../hbase/client/AsyncConnectionImpl.java       |  27 +++--
 .../hadoop/hbase/client/AsyncRegionLocator.java |   2 +-
 .../client/AsyncRpcRetryingCallerFactory.java   | 102 ++++++++++++----
 .../AsyncScanSingleRegionRpcRetryingCaller.java |   4 +-
 .../AsyncSingleRequestRpcRetryingCaller.java    |   5 +-
 .../client/AsyncSmallScanRpcRetryingCaller.java |  15 ++-
 .../apache/hadoop/hbase/client/AsyncTable.java  |   6 +-
 .../hadoop/hbase/client/AsyncTableBase.java     | 118 ++++++-------------
 .../hadoop/hbase/client/AsyncTableBuilder.java  | 113 ++++++++++++++++++
 .../hbase/client/AsyncTableBuilderBase.java     | 111 +++++++++++++++++
 .../hadoop/hbase/client/AsyncTableImpl.java     |  42 +++----
 .../hadoop/hbase/client/ConnectionUtils.java    |  15 +--
 .../hadoop/hbase/client/RawAsyncTable.java      |   2 +
 .../hadoop/hbase/client/RawAsyncTableImpl.java  | 113 ++++++++++--------
 .../org/apache/hadoop/hbase/HConstants.java     |   3 -
 ...TestAsyncSingleRequestRpcRetryingCaller.java |  52 +++-----
 .../client/TestAsyncTableGetMultiThreaded.java  |  28 ++---
 .../hbase/client/TestRawAsyncTableScan.java     |   6 +-
 22 files changed, 583 insertions(+), 313 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
index 6f0b8e9..9b362d1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java
@@ -21,7 +21,6 @@ import static org.apache.hadoop.hbase.CellUtil.createCellScanner;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
@@ -40,7 +39,6 @@ import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -61,7 +59,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-import org.apache.hadoop.hbase.util.AtomicUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -102,9 +99,7 @@ class AsyncBatchRpcRetryingCaller<T> {
 
   private final long operationTimeoutNs;
 
-  private final long readRpcTimeoutNs;
-
-  private final long writeRpcTimeoutNs;
+  private final long rpcTimeoutNs;
 
   private final int startLogErrorsCnt;
 
@@ -128,39 +123,22 @@ class AsyncBatchRpcRetryingCaller<T> {
     public final ConcurrentMap<byte[], RegionRequest> actionsByRegion =
         new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR);
 
-    public final AtomicLong rpcTimeoutNs;
-
-    public ServerRequest(long defaultRpcTimeoutNs) {
-      this.rpcTimeoutNs = new AtomicLong(defaultRpcTimeoutNs);
-    }
-
-    public void addAction(HRegionLocation loc, Action action, long rpcTimeoutNs) {
+    public void addAction(HRegionLocation loc, Action action) {
       computeIfAbsent(actionsByRegion, loc.getRegionInfo().getRegionName(),
         () -> new RegionRequest(loc)).actions.add(action);
-      // try update the timeout to a larger value
-      if (this.rpcTimeoutNs.get() <= 0) {
-        return;
-      }
-      if (rpcTimeoutNs <= 0) {
-        this.rpcTimeoutNs.set(-1L);
-        return;
-      }
-      AtomicUtils.updateMax(this.rpcTimeoutNs, rpcTimeoutNs);
     }
   }
 
   public AsyncBatchRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
-      TableName tableName, List<? extends Row> actions, long pauseNs, int maxRetries,
-      long operationTimeoutNs, long readRpcTimeoutNs, long writeRpcTimeoutNs,
-      int startLogErrorsCnt) {
+      TableName tableName, List<? extends Row> actions, long pauseNs, int maxAttempts,
+      long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) {
     this.retryTimer = retryTimer;
     this.conn = conn;
     this.tableName = tableName;
     this.pauseNs = pauseNs;
-    this.maxAttempts = retries2Attempts(maxRetries);
+    this.maxAttempts = maxAttempts;
     this.operationTimeoutNs = operationTimeoutNs;
-    this.readRpcTimeoutNs = readRpcTimeoutNs;
-    this.writeRpcTimeoutNs = writeRpcTimeoutNs;
+    this.rpcTimeoutNs = rpcTimeoutNs;
     this.startLogErrorsCnt = startLogErrorsCnt;
 
     this.actions = new ArrayList<>(actions.size());
@@ -366,7 +344,7 @@ class AsyncBatchRpcRetryingCaller<T> {
         return;
       }
       HBaseRpcController controller = conn.rpcControllerFactory.newController();
-      resetController(controller, Math.min(serverReq.rpcTimeoutNs.get(), remainingNs));
+      resetController(controller, Math.min(rpcTimeoutNs, remainingNs));
       if (!cells.isEmpty()) {
         controller.setCellScanner(createCellScanner(cells));
       }
@@ -416,10 +394,6 @@ class AsyncBatchRpcRetryingCaller<T> {
     retryTimer.newTimeout(t -> groupAndSend(actions, tries + 1), delayNs, TimeUnit.NANOSECONDS);
   }
 
-  private long getRpcTimeoutNs(Action action) {
-    return action.getAction() instanceof Get ? readRpcTimeoutNs : writeRpcTimeoutNs;
-  }
-
   private void groupAndSend(Stream<Action> actions, int tries) {
     long locateTimeoutNs;
     if (operationTimeoutNs > 0) {
@@ -433,15 +407,6 @@ class AsyncBatchRpcRetryingCaller<T> {
     }
     ConcurrentMap<ServerName, ServerRequest> actionsByServer = new ConcurrentHashMap<>();
     ConcurrentLinkedQueue<Action> locateFailed = new ConcurrentLinkedQueue<>();
-    // use the small one as the default timeout value, and increase the timeout value if we have an
-    // action in the group needs a larger timeout value.
-    long defaultRpcTimeoutNs;
-    if (readRpcTimeoutNs > 0) {
-      defaultRpcTimeoutNs =
-          writeRpcTimeoutNs > 0 ? Math.min(readRpcTimeoutNs, writeRpcTimeoutNs) : readRpcTimeoutNs;
-    } else {
-      defaultRpcTimeoutNs = writeRpcTimeoutNs > 0 ? writeRpcTimeoutNs : -1L;
-    }
     CompletableFuture.allOf(actions
         .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(),
           RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> {
@@ -454,9 +419,8 @@ class AsyncBatchRpcRetryingCaller<T> {
               addError(action, error, null);
               locateFailed.add(action);
             } else {
-              computeIfAbsent(actionsByServer, loc.getServerName(),
-                () -> new ServerRequest(defaultRpcTimeoutNs)).addAction(loc, action,
-                  getRpcTimeoutNs(action));
+              computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new)
+                  .addAction(loc, action);
             }
           }))
         .toArray(CompletableFuture[]::new)).whenComplete((v, r) -> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
index d7a3ed1..f656a6c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java
@@ -55,14 +55,21 @@ class AsyncClientScanner {
 
   private final AsyncConnectionImpl conn;
 
+  private final long pauseNs;
+
+  private final int maxAttempts;
+
   private final long scanTimeoutNs;
 
   private final long rpcTimeoutNs;
 
+  private final int startLogErrorsCnt;
+
   private final ScanResultCache resultCache;
 
   public AsyncClientScanner(Scan scan, RawScanResultConsumer consumer, TableName tableName,
-      AsyncConnectionImpl conn, long scanTimeoutNs, long rpcTimeoutNs) {
+      AsyncConnectionImpl conn, long pauseNs, int maxAttempts, long scanTimeoutNs,
+      long rpcTimeoutNs, int startLogErrorsCnt) {
     if (scan.getStartRow() == null) {
       scan.withStartRow(EMPTY_START_ROW, scan.includeStartRow());
     }
@@ -73,8 +80,11 @@ class AsyncClientScanner {
     this.consumer = consumer;
     this.tableName = tableName;
     this.conn = conn;
+    this.pauseNs = pauseNs;
+    this.maxAttempts = maxAttempts;
     this.scanTimeoutNs = scanTimeoutNs;
     this.rpcTimeoutNs = rpcTimeoutNs;
+    this.startLogErrorsCnt = startLogErrorsCnt;
     this.resultCache = scan.getAllowPartialResults() || scan.getBatch() > 0
         ? new AllowPartialScanResultCache() : new CompleteScanResultCache();
   }
@@ -117,7 +127,9 @@ class AsyncClientScanner {
     conn.callerFactory.scanSingleRegion().id(resp.scannerId).location(resp.loc).stub(resp.stub)
         .setScan(scan).consumer(consumer).resultCache(resultCache)
         .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).start().whenComplete((hasMore, error) -> {
+        .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
+        .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).start()
+        .whenComplete((hasMore, error) -> {
           if (error != null) {
             consumer.onError(error);
             return;
@@ -133,8 +145,9 @@ class AsyncClientScanner {
   private void openScanner() {
     conn.callerFactory.<OpenScannerResponse> single().table(tableName).row(scan.getStartRow())
         .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::callOpenScanner).call()
-        .whenComplete((resp, error) -> {
+        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
+        .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner)
+        .call().whenComplete((resp, error) -> {
           if (error != null) {
             consumer.onError(error);
             return;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
index 7b0f339..9f114ac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
@@ -50,21 +50,32 @@ public interface AsyncConnection extends Closeable {
   AsyncTableRegionLocator getRegionLocator(TableName tableName);
 
   /**
-   * Retrieve an RawAsyncTable implementation for accessing a table. The returned Table is not
-   * thread safe, a new instance should be created for each using thread. This is a lightweight
-   * operation, pooling or caching of the returned AsyncTable is neither required nor desired.
+   * Retrieve an {@link RawAsyncTable} implementation for accessing a table.
+   * <p>
+   * The returned instance will use default configs. Use {@link #getRawTableBuilder(TableName)} if you
+   * want to customize some configs.
    * <p>
    * This method no longer checks table existence. An exception will be thrown if the table does not
    * exist only when the first operation is attempted.
    * @param tableName the name of the table
    * @return an RawAsyncTable to use for interactions with this table
+   * @see #getRawTableBuilder(TableName)
+   */
+  default RawAsyncTable getRawTable(TableName tableName) {
+    return getRawTableBuilder(tableName).build();
+  }
+
+  /**
+   * Returns an {@link AsyncTableBuilder} for creating {@link RawAsyncTable}.
+   * <p>
+   * This method no longer checks table existence. An exception will be thrown if the table does not
+   * exist only when the first operation is attempted.
+   * @param tableName the name of the table
    */
-  RawAsyncTable getRawTable(TableName tableName);
+  AsyncTableBuilder<RawAsyncTable> getRawTableBuilder(TableName tableName);
 
   /**
-   * Retrieve an AsyncTable implementation for accessing a table. The returned Table is not thread
-   * safe, a new instance should be created for each using thread. This is a lightweight operation,
-   * pooling or caching of the returned AsyncTable is neither required nor desired.
+   * Retrieve an AsyncTable implementation for accessing a table.
    * <p>
    * This method no longer checks table existence. An exception will be thrown if the table does not
    * exist only when the first operation is attempted.
@@ -72,5 +83,17 @@ public interface AsyncConnection extends Closeable {
    * @param pool the thread pool to use for executing callback
    * @return an AsyncTable to use for interactions with this table
    */
-  AsyncTable getTable(TableName tableName, ExecutorService pool);
+  default AsyncTable getTable(TableName tableName, ExecutorService pool) {
+    return getTableBuilder(tableName, pool).build();
+  }
+
+  /**
+   * Returns an {@link AsyncTableBuilder} for creating {@link AsyncTable}.
+   * <p>
+   * This method no longer checks table existence. An exception will be thrown if the table does not
+   * exist only when the first operation is attempted.
+   * @param tableName the name of the table
+   * @param pool the thread pool to use for executing callback
+   */
+  AsyncTableBuilder<AsyncTable> getTableBuilder(TableName tableName, ExecutorService pool);
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
index 6279d46..585a104 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
@@ -56,6 +56,10 @@ class AsyncConnectionConfiguration {
   // by this value, see scanTimeoutNs.
   private final long operationTimeoutNs;
 
+  // timeout for each rpc request. Can be overridden by a more specific config, such as
+  // readRpcTimeout or writeRpcTimeout.
+  private final long rpcTimeoutNs;
+
   // timeout for each read rpc request
   private final long readRpcTimeoutNs;
 
@@ -85,10 +89,12 @@ class AsyncConnectionConfiguration {
       conf.getLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
     this.operationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
       conf.getLong(HBASE_CLIENT_OPERATION_TIMEOUT, DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
-    this.readRpcTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY,
-      conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
-    this.writeRpcTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY,
-      conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
+    this.rpcTimeoutNs = TimeUnit.MILLISECONDS
+        .toNanos(conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT));
+    this.readRpcTimeoutNs =
+        TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutNs));
+    this.writeRpcTimeoutNs =
+        TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, rpcTimeoutNs));
     this.pauseNs =
         TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_CLIENT_PAUSE, DEFAULT_HBASE_CLIENT_PAUSE));
     this.maxRetries = conf.getInt(HBASE_CLIENT_RETRIES_NUMBER, DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
@@ -111,6 +117,10 @@ class AsyncConnectionConfiguration {
     return operationTimeoutNs;
   }
 
+  long getRpcTimeoutNs() {
+    return rpcTimeoutNs;
+  }
+
   long getReadRpcTimeoutNs() {
     return readRpcTimeoutNs;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index d660b02..c58500a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.CLUSTER_ID_DEFAULT;
-import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
-import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.NO_NONCE_GENERATOR;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.getStubKey;
 import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY;
@@ -90,7 +88,6 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   private final ConcurrentMap<String, ClientService.Interface> rsStubs = new ConcurrentHashMap<>();
 
-  @SuppressWarnings("deprecation")
   public AsyncConnectionImpl(Configuration conf, User user) {
     this.conf = conf;
     this.user = user;
@@ -105,7 +102,8 @@ class AsyncConnectionImpl implements AsyncConnection {
     this.rpcClient = RpcClientFactory.createClient(conf, clusterId);
     this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     this.hostnameCanChange = conf.getBoolean(RESOLVE_HOSTNAME_ON_FAIL_KEY, true);
-    this.rpcTimeout = conf.getInt(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT);
+    this.rpcTimeout = (int) Math.min(Integer.MAX_VALUE,
+      TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs()));
     this.locator = new AsyncRegionLocator(this, RETRY_TIMER);
     this.callerFactory = new AsyncRpcRetryingCallerFactory(this, RETRY_TIMER);
     if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) {
@@ -152,12 +150,25 @@ class AsyncConnectionImpl implements AsyncConnection {
   }
 
   @Override
-  public RawAsyncTable getRawTable(TableName tableName) {
-    return new RawAsyncTableImpl(this, tableName);
+  public AsyncTableBuilder<RawAsyncTable> getRawTableBuilder(TableName tableName) {
+    return new AsyncTableBuilderBase<RawAsyncTable>(tableName, connConf) {
+
+      @Override
+      public RawAsyncTable build() {
+        return new RawAsyncTableImpl(AsyncConnectionImpl.this, this);
+      }
+    };
   }
 
   @Override
-  public AsyncTable getTable(TableName tableName, ExecutorService pool) {
-    return new AsyncTableImpl(this, tableName, pool);
+  public AsyncTableBuilder<AsyncTable> getTableBuilder(TableName tableName, ExecutorService pool) {
+    return new AsyncTableBuilderBase<AsyncTable>(tableName, connConf) {
+
+      @Override
+      public AsyncTable build() {
+        RawAsyncTableImpl rawTable = new RawAsyncTableImpl(AsyncConnectionImpl.this, this);
+        return new AsyncTableImpl(AsyncConnectionImpl.this, rawTable, pool);
+      }
+    };
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
index 7a45ae3..7030eac 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java
@@ -71,7 +71,7 @@ class AsyncRegionLocator {
       future.completeExceptionally(new TimeoutIOException(timeoutMsg.get()));
     }, timeoutNs, TimeUnit.NANOSECONDS);
     return future.whenComplete((loc, error) -> {
-      if (error.getClass() != TimeoutIOException.class) {
+      if (error != null && error.getClass() != TimeoutIOException.class) {
         // cancel timeout task if we are not completed by it.
         timeoutTask.cancel();
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index 55c56ab..76b6a33 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.client;
 
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.*;
 
 import io.netty.util.HashedWheelTimer;
 
@@ -46,7 +47,16 @@ class AsyncRpcRetryingCallerFactory {
     this.retryTimer = retryTimer;
   }
 
-  public class SingleRequestCallerBuilder<T> {
+  private abstract class BuilderBase {
+
+    protected long pauseNs = conn.connConf.getPauseNs();
+
+    protected int maxAttempts = retries2Attempts(conn.connConf.getMaxRetries());
+
+    protected int startLogErrorsCnt = conn.connConf.getStartLogErrorsCnt();
+  }
+
+  public class SingleRequestCallerBuilder<T> extends BuilderBase {
 
     private TableName tableName;
 
@@ -91,12 +101,26 @@ class AsyncRpcRetryingCallerFactory {
       return this;
     }
 
+    public SingleRequestCallerBuilder<T> pause(long pause, TimeUnit unit) {
+      this.pauseNs = unit.toNanos(pause);
+      return this;
+    }
+
+    public SingleRequestCallerBuilder<T> maxAttempts(int maxAttempts) {
+      this.maxAttempts = maxAttempts;
+      return this;
+    }
+
+    public SingleRequestCallerBuilder<T> startLogErrorsCnt(int startLogErrorsCnt) {
+      this.startLogErrorsCnt = startLogErrorsCnt;
+      return this;
+    }
+
     public AsyncSingleRequestRpcRetryingCaller<T> build() {
       return new AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn,
           checkNotNull(tableName, "tableName is null"), checkNotNull(row, "row is null"),
           checkNotNull(locateType, "locateType is null"), checkNotNull(callable, "action is null"),
-          conn.connConf.getPauseNs(), conn.connConf.getMaxRetries(), operationTimeoutNs,
-          rpcTimeoutNs, conn.connConf.getStartLogErrorsCnt());
+          pauseNs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
     }
 
     /**
@@ -114,7 +138,7 @@ class AsyncRpcRetryingCallerFactory {
     return new SingleRequestCallerBuilder<>();
   }
 
-  public class SmallScanCallerBuilder {
+  public class SmallScanCallerBuilder extends BuilderBase {
 
     private TableName tableName;
 
@@ -151,12 +175,27 @@ class AsyncRpcRetryingCallerFactory {
       return this;
     }
 
+    public SmallScanCallerBuilder pause(long pause, TimeUnit unit) {
+      this.pauseNs = unit.toNanos(pause);
+      return this;
+    }
+
+    public SmallScanCallerBuilder maxAttempts(int maxAttempts) {
+      this.maxAttempts = maxAttempts;
+      return this;
+    }
+
+    public SmallScanCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) {
+      this.startLogErrorsCnt = startLogErrorsCnt;
+      return this;
+    }
+
     public AsyncSmallScanRpcRetryingCaller build() {
       TableName tableName = checkNotNull(this.tableName, "tableName is null");
       Scan scan = checkNotNull(this.scan, "scan is null");
       checkArgument(limit > 0, "invalid limit %d", limit);
-      return new AsyncSmallScanRpcRetryingCaller(conn, tableName, scan, limit, scanTimeoutNs,
-          rpcTimeoutNs);
+      return new AsyncSmallScanRpcRetryingCaller(conn, tableName, scan, limit, pauseNs, maxAttempts,
+          scanTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
     }
 
     /**
@@ -174,7 +213,7 @@ class AsyncRpcRetryingCallerFactory {
     return new SmallScanCallerBuilder();
   }
 
-  public class ScanSingleRegionCallerBuilder {
+  public class ScanSingleRegionCallerBuilder extends BuilderBase {
 
     private long scannerId = -1L;
 
@@ -232,15 +271,29 @@ class AsyncRpcRetryingCallerFactory {
       return this;
     }
 
+    public ScanSingleRegionCallerBuilder pause(long pause, TimeUnit unit) {
+      this.pauseNs = unit.toNanos(pause);
+      return this;
+    }
+
+    public ScanSingleRegionCallerBuilder maxAttempts(int maxAttempts) {
+      this.maxAttempts = maxAttempts;
+      return this;
+    }
+
+    public ScanSingleRegionCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) {
+      this.startLogErrorsCnt = startLogErrorsCnt;
+      return this;
+    }
+
     public AsyncScanSingleRegionRpcRetryingCaller build() {
       checkArgument(scannerId >= 0, "invalid scannerId %d", scannerId);
       return new AsyncScanSingleRegionRpcRetryingCaller(retryTimer, conn,
           checkNotNull(scan, "scan is null"), scannerId,
           checkNotNull(resultCache, "resultCache is null"),
           checkNotNull(consumer, "consumer is null"), checkNotNull(stub, "stub is null"),
-          checkNotNull(loc, "location is null"), conn.connConf.getPauseNs(),
-          conn.connConf.getMaxRetries(), scanTimeoutNs, rpcTimeoutNs,
-          conn.connConf.getStartLogErrorsCnt());
+          checkNotNull(loc, "location is null"), pauseNs, maxAttempts, scanTimeoutNs, rpcTimeoutNs,
+          startLogErrorsCnt);
     }
 
     /**
@@ -258,7 +311,7 @@ class AsyncRpcRetryingCallerFactory {
     return new ScanSingleRegionCallerBuilder();
   }
 
-  public class BatchCallerBuilder {
+  public class BatchCallerBuilder extends BuilderBase {
 
     private TableName tableName;
 
@@ -266,9 +319,7 @@ class AsyncRpcRetryingCallerFactory {
 
     private long operationTimeoutNs = -1L;
 
-    private long readRpcTimeoutNs = -1L;
-
-    private long writeRpcTimeoutNs = -1L;
+    private long rpcTimeoutNs = -1L;
 
     public BatchCallerBuilder table(TableName tableName) {
       this.tableName = tableName;
@@ -285,20 +336,29 @@ class AsyncRpcRetryingCallerFactory {
       return this;
     }
 
-    public BatchCallerBuilder readRpcTimeout(long rpcTimeout, TimeUnit unit) {
-      this.readRpcTimeoutNs = unit.toNanos(rpcTimeout);
+    public BatchCallerBuilder rpcTimeout(long rpcTimeout, TimeUnit unit) {
+      this.rpcTimeoutNs = unit.toNanos(rpcTimeout);
+      return this;
+    }
+
+    public BatchCallerBuilder pause(long pause, TimeUnit unit) {
+      this.pauseNs = unit.toNanos(pause);
+      return this;
+    }
+
+    public BatchCallerBuilder maxAttempts(int maxAttempts) {
+      this.maxAttempts = maxAttempts;
       return this;
     }
 
-    public BatchCallerBuilder writeRpcTimeout(long rpcTimeout, TimeUnit unit) {
-      this.writeRpcTimeoutNs = unit.toNanos(rpcTimeout);
+    public BatchCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) {
+      this.startLogErrorsCnt = startLogErrorsCnt;
       return this;
     }
 
     public <T> AsyncBatchRpcRetryingCaller<T> build() {
-      return new AsyncBatchRpcRetryingCaller<T>(retryTimer, conn, tableName, actions,
-          conn.connConf.getPauseNs(), conn.connConf.getMaxRetries(), operationTimeoutNs,
-          readRpcTimeoutNs, writeRpcTimeoutNs, conn.connConf.getStartLogErrorsCnt());
+      return new AsyncBatchRpcRetryingCaller<T>(retryTimer, conn, tableName, actions, pauseNs,
+          maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt);
     }
 
     public <T> List<CompletableFuture<T>> call() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
index dae88a7..5d3b736 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java
@@ -108,7 +108,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
   public AsyncScanSingleRegionRpcRetryingCaller(HashedWheelTimer retryTimer,
       AsyncConnectionImpl conn, Scan scan, long scannerId, ScanResultCache resultCache,
       RawScanResultConsumer consumer, Interface stub, HRegionLocation loc, long pauseNs,
-      int maxRetries, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) {
+      int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) {
     this.retryTimer = retryTimer;
     this.scan = scan;
     this.scannerId = scannerId;
@@ -117,7 +117,7 @@ class AsyncScanSingleRegionRpcRetryingCaller {
     this.stub = stub;
     this.loc = loc;
     this.pauseNs = pauseNs;
-    this.maxAttempts = retries2Attempts(maxRetries);
+    this.maxAttempts = maxAttempts;
     this.scanTimeoutNs = scanTimeoutNs;
     this.rpcTimeoutNs = rpcTimeoutNs;
     this.startLogErrorsCnt = startLogErrorsCnt;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
index 04e69af..4ce6a18 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
 
 import io.netty.util.HashedWheelTimer;
@@ -90,7 +89,7 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
 
   public AsyncSingleRequestRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
       TableName tableName, byte[] row, RegionLocateType locateType, Callable<T> callable,
-      long pauseNs, int maxRetries, long operationTimeoutNs, long rpcTimeoutNs,
+      long pauseNs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs,
       int startLogErrorsCnt) {
     this.retryTimer = retryTimer;
     this.conn = conn;
@@ -99,7 +98,7 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
     this.locateType = locateType;
     this.callable = callable;
     this.pauseNs = pauseNs;
-    this.maxAttempts = retries2Attempts(maxRetries);
+    this.maxAttempts = maxAttempts;
     this.operationTimeoutNs = operationTimeoutNs;
     this.rpcTimeoutNs = rpcTimeoutNs;
     this.startLogErrorsCnt = startLogErrorsCnt;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
index 6ffa30a..98a276f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSmallScanRpcRetryingCaller.java
@@ -57,6 +57,12 @@ class AsyncSmallScanRpcRetryingCaller {
 
   private final long rpcTimeoutNs;
 
+  private final long pauseNs;
+
+  private final int maxAttempts;
+
+  private final int startLogErrosCnt;
+
   private final Function<HRegionInfo, Boolean> nextScan;
 
   private final List<Result> resultList;
@@ -64,13 +70,17 @@ class AsyncSmallScanRpcRetryingCaller {
   private final CompletableFuture<List<Result>> future;
 
   public AsyncSmallScanRpcRetryingCaller(AsyncConnectionImpl conn, TableName tableName, Scan scan,
-      int limit, long scanTimeoutNs, long rpcTimeoutNs) {
+      int limit, long pauseNs, int maxAttempts, long scanTimeoutNs, long rpcTimeoutNs,
+      int startLogErrosCnt) {
     this.conn = conn;
     this.tableName = tableName;
     this.scan = scan;
     this.limit = limit;
     this.scanTimeoutNs = scanTimeoutNs;
     this.rpcTimeoutNs = rpcTimeoutNs;
+    this.pauseNs = pauseNs;
+    this.maxAttempts = maxAttempts;
+    this.startLogErrosCnt = startLogErrosCnt;
     if (scan.isReversed()) {
       this.nextScan = this::reversedNextScan;
     } else {
@@ -146,7 +156,8 @@ class AsyncSmallScanRpcRetryingCaller {
   private void scan() {
     conn.callerFactory.<SmallScanResponse> single().table(tableName).row(scan.getStartRow())
         .locateType(getLocateType(scan)).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).action(this::scan).call()
+        .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
+        .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrosCnt).action(this::scan).call()
         .whenComplete((resp, error) -> {
           if (error != null) {
             future.completeExceptionally(error);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
index 893beb9..402ad64 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java
@@ -23,9 +23,11 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 /**
  * The asynchronous table for normal users.
  * <p>
+ * The implementation is required to be thread safe.
+ * <p>
  * The implementation should make sure that user can do everything they want to the returned
- * {@code CompletableFuture} without break anything. Usually the implementation will require user to
- * provide a {@code ExecutorService}.
+ * {@code CompletableFuture} without breaking anything. Usually the implementation will require user
+ * to provide a {@code ExecutorService}.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Unstable

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
index d80627f..d82fa22 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
@@ -18,9 +18,8 @@
 package org.apache.hadoop.hbase.client;
 
 import static java.util.stream.Collectors.toList;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.allOf;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.toCheckExistenceOnly;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.voidBatch;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.voidBatchAll;
 
 import com.google.common.base.Preconditions;
 
@@ -39,10 +38,9 @@ import org.apache.hadoop.hbase.util.Bytes;
  * The base interface for asynchronous version of Table. Obtain an instance from a
  * {@link AsyncConnection}.
  * <p>
- * The implementation is NOT required to be thread safe. Do NOT access it from multiple threads
- * concurrently.
+ * The implementation is required to be thread safe.
  * <p>
- * Usually the implementations will not throw any exception directly, you need to get the exception
+ * Usually the implementation will not throw any exception directly. You need to get the exception
  * from the returned {@link CompletableFuture}.
  */
 @InterfaceAudience.Public
@@ -62,12 +60,12 @@ public interface AsyncTableBase {
   Configuration getConfiguration();
 
   /**
-   * Set timeout of each rpc read request in operations of this Table instance, will override the
-   * value of {@code hbase.rpc.read.timeout} in configuration. If a rpc read request waiting too
-   * long, it will stop waiting and send a new request to retry until retries exhausted or operation
-   * timeout reached.
+   * Get timeout of each rpc request in this Table instance. It will be overridden by a more
+   * specific rpc timeout config such as readRpcTimeout or writeRpcTimeout.
+   * @see #getReadRpcTimeout(TimeUnit)
+   * @see #getWriteRpcTimeout(TimeUnit)
    */
-  void setReadRpcTimeout(long timeout, TimeUnit unit);
+  long getRpcTimeout(TimeUnit unit);
 
   /**
    * Get timeout of each rpc read request in this Table instance.
@@ -75,47 +73,18 @@ public interface AsyncTableBase {
   long getReadRpcTimeout(TimeUnit unit);
 
   /**
-   * Set timeout of each rpc write request in operations of this Table instance, will override the
-   * value of {@code hbase.rpc.write.timeout} in configuration. If a rpc write request waiting too
-   * long, it will stop waiting and send a new request to retry until retries exhausted or operation
-   * timeout reached.
-   */
-  void setWriteRpcTimeout(long timeout, TimeUnit unit);
-
-  /**
    * Get timeout of each rpc write request in this Table instance.
    */
   long getWriteRpcTimeout(TimeUnit unit);
 
   /**
-   * Set timeout of each operation in this Table instance, will override the value of
-   * {@code hbase.client.operation.timeout} in configuration.
-   * <p>
-   * Operation timeout is a top-level restriction that makes sure an operation will not be blocked
-   * more than this. In each operation, if rpc request fails because of timeout or other reason, it
-   * will retry until success or throw a RetriesExhaustedException. But if the total time elapsed
-   * reach the operation timeout before retries exhausted, it will break early and throw
-   * SocketTimeoutException.
-   */
-  void setOperationTimeout(long timeout, TimeUnit unit);
-
-  /**
    * Get timeout of each operation in Table instance.
    */
   long getOperationTimeout(TimeUnit unit);
 
   /**
-   * Set timeout of a single operation in a scan, such as openScanner and next. Will override the
-   * value {@code hbase.client.scanner.timeout.period} in configuration.
-   * <p>
-   * Generally a scan will never timeout after we add heartbeat support unless the region is
-   * crashed. The {@code scanTimeout} works like the {@code operationTimeout} for each single
-   * operation in a scan.
-   */
-  void setScanTimeout(long timeout, TimeUnit unit);
-
-  /**
-   * Get the timeout of a single operation in a scan.
+   * Get the timeout of a single operation in a scan. It works like operation timeout for other
+   * operations.
    */
   long getScanTimeout(TimeUnit unit);
 
@@ -353,29 +322,6 @@ public interface AsyncTableBase {
   CompletableFuture<List<Result>> smallScan(Scan scan, int limit);
 
   /**
-   * Extracts certain cells from the given rows, in batch.
-   * <p>
-   * Notice that you may not get all the results with this function, which means some of the
-   * returned {@link CompletableFuture}s may succeed while some of the other returned
-   * {@link CompletableFuture}s may fail.
-   * @param gets The objects that specify what data to fetch and from which rows.
-   * @return A list of {@link CompletableFuture}s that represent the result for each get.
-   */
-  default List<CompletableFuture<Result>> get(List<Get> gets) {
-    return batch(gets);
-  }
-
-  /**
-   * A simple version for batch get. It will fail if there are any failures and you will get the
-   * whole result list at once if the operation is succeeded.
-   * @param gets The objects that specify what data to fetch and from which rows.
-   * @return A {@link CompletableFuture} that wrapper the result list.
-   */
-  default CompletableFuture<List<Result>> getAll(List<Get> gets) {
-    return batchAll(gets);
-  }
-
-  /**
    * Test for the existence of columns in the table, as specified by the Gets.
    * <p>
    * This will return a list of booleans. Each value will be true if the related Get matches one or
@@ -386,8 +332,8 @@ public interface AsyncTableBase {
    * @return A list of {@link CompletableFuture}s that represent the existence for each get.
    */
   default List<CompletableFuture<Boolean>> exists(List<Get> gets) {
-    return get(toCheckExistenceOnly(gets)).stream().
-        <CompletableFuture<Boolean>>map(f -> f.thenApply(r -> r.getExists())).collect(toList());
+    return get(toCheckExistenceOnly(gets)).stream()
+        .<CompletableFuture<Boolean>> map(f -> f.thenApply(r -> r.getExists())).collect(toList());
   }
 
   /**
@@ -397,8 +343,28 @@ public interface AsyncTableBase {
    * @return A {@link CompletableFuture} that wrapper the result boolean list.
    */
   default CompletableFuture<List<Boolean>> existsAll(List<Get> gets) {
-    return getAll(toCheckExistenceOnly(gets))
-        .thenApply(l -> l.stream().map(r -> r.getExists()).collect(toList()));
+    return allOf(exists(gets));
+  }
+
+  /**
+   * Extracts certain cells from the given rows, in batch.
+   * <p>
+   * Notice that you may not get all the results with this function, which means some of the
+   * returned {@link CompletableFuture}s may succeed while some of the other returned
+   * {@link CompletableFuture}s may fail.
+   * @param gets The objects that specify what data to fetch and from which rows.
+   * @return A list of {@link CompletableFuture}s that represent the result for each get.
+   */
+  List<CompletableFuture<Result>> get(List<Get> gets);
+
+  /**
+   * A simple version for batch get. It will fail if there are any failures and you will get the
+   * whole result list at once if the operation is succeeded.
+   * @param gets The objects that specify what data to fetch and from which rows.
+   * @return A {@link CompletableFuture} that wrapper the result list.
+   */
+  default CompletableFuture<List<Result>> getAll(List<Get> gets) {
+    return allOf(get(gets));
   }
 
   /**
@@ -406,9 +372,7 @@ public interface AsyncTableBase {
    * @param puts The list of mutations to apply.
    * @return A list of {@link CompletableFuture}s that represent the result for each put.
    */
-  default List<CompletableFuture<Void>> put(List<Put> puts) {
-    return voidBatch(this, puts);
-  }
+  List<CompletableFuture<Void>> put(List<Put> puts);
 
   /**
    * A simple version of batch put. It will fail if there are any failures.
@@ -416,7 +380,7 @@ public interface AsyncTableBase {
    * @return A {@link CompletableFuture} that always returns null when complete normally.
    */
   default CompletableFuture<Void> putAll(List<Put> puts) {
-    return voidBatchAll(this, puts);
+    return allOf(put(puts)).thenApply(r -> null);
   }
 
   /**
@@ -424,9 +388,7 @@ public interface AsyncTableBase {
    * @param deletes list of things to delete.
    * @return A list of {@link CompletableFuture}s that represent the result for each delete.
    */
-  default List<CompletableFuture<Void>> delete(List<Delete> deletes) {
-    return voidBatch(this, deletes);
-  }
+  List<CompletableFuture<Void>> delete(List<Delete> deletes);
 
   /**
    * A simple version of batch delete. It will fail if there are any failures.
@@ -434,7 +396,7 @@ public interface AsyncTableBase {
    * @return A {@link CompletableFuture} that always returns null when complete normally.
    */
   default CompletableFuture<Void> deleteAll(List<Delete> deletes) {
-    return voidBatchAll(this, deletes);
+    return allOf(delete(deletes)).thenApply(r -> null);
   }
 
   /**
@@ -454,8 +416,6 @@ public interface AsyncTableBase {
    * @return A list of the result for the actions. Wrapped by a {@link CompletableFuture}.
    */
   default <T> CompletableFuture<List<T>> batchAll(List<? extends Row> actions) {
-    List<CompletableFuture<T>> futures = batch(actions);
-    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
-        .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
+    return allOf(batch(actions));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java
new file mode 100644
index 0000000..2330855
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * For creating {@link AsyncTable} or {@link RawAsyncTable}.
+ * <p>
+ * The implementation should have default configurations set before returning the builder to user.
+ * So users are free to only set the configs they care about to create a new
+ * AsyncTable/RawAsyncTable instance.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface AsyncTableBuilder<T extends AsyncTableBase> {
+
+  /**
+   * Set timeout for a whole operation such as get, put or delete. Notice that scan will not be
+   * effected by this value, see scanTimeoutNs.
+   * <p>
+   * Operation timeout and max attempt times(or max retry times) are both limitations for retrying,
+   * we will stop retrying when we reach any of the limitations.
+   * @see #setMaxAttempts(int)
+   * @see #setMaxRetries(int)
+   * @see #setScanTimeout(long, TimeUnit)
+   */
+  AsyncTableBuilder<T> setOperationTimeout(long timeout, TimeUnit unit);
+
+  /**
+   * As now we have heartbeat support for scan, ideally a scan will never timeout unless the RS is
+   * crash. The RS will always return something before the rpc timed out or scan timed out to tell
+   * the client that it is still alive. The scan timeout is used as operation timeout for every
+   * operation in a scan, such as openScanner or next.
+   * @see #setScanTimeout(long, TimeUnit)
+   */
+  AsyncTableBuilder<T> setScanTimeout(long timeout, TimeUnit unit);
+
+  /**
+   * Set timeout for each rpc request.
+   * <p>
+   * Notice that this will <strong>NOT</strong> change the rpc timeout for read(get, scan) request
+   * and write request(put, delete).
+   */
+  AsyncTableBuilder<T> setRpcTimeout(long timeout, TimeUnit unit);
+
+  /**
+   * Set timeout for each read(get, scan) rpc request.
+   */
+  AsyncTableBuilder<T> setReadRpcTimeout(long timeout, TimeUnit unit);
+
+  /**
+   * Set timeout for each write(put, delete) rpc request.
+   */
+  AsyncTableBuilder<T> setWriteRpcTimeout(long timeout, TimeUnit unit);
+
+  /**
+   * Set the base pause time for retrying. We use an exponential policy to generate sleep time when
+   * retrying.
+   */
+  AsyncTableBuilder<T> setRetryPause(long pause, TimeUnit unit);
+
+  /**
+   * Set the max retry times for an operation. Usually it is the max attempt times minus 1.
+   * <p>
+   * Operation timeout and max attempt times(or max retry times) are both limitations for retrying,
+   * we will stop retrying when we reach any of the limitations.
+   * @see #setMaxAttempts(int)
+   * @see #setOperationTimeout(long, TimeUnit)
+   */
+  default AsyncTableBuilder<T> setMaxRetries(int maxRetries) {
+    return setMaxAttempts(retries2Attempts(maxRetries));
+  }
+
+  /**
+   * Set the max attempt times for an operation. Usually it is the max retry times plus 1. Operation
+   * timeout and max attempt times(or max retry times) are both limitations for retrying, we will
+   * stop retrying when we reach any of the limitations.
+   * @see #setMaxRetries(int)
+   * @see #setOperationTimeout(long, TimeUnit)
+   */
+  AsyncTableBuilder<T> setMaxAttempts(int maxAttempts);
+
+  /**
+   * Set the number of retries that are allowed before we start to log.
+   */
+  AsyncTableBuilder<T> setStartLogErrorsCnt(int startLogErrorsCnt);
+
+  /**
+   * Create the {@link AsyncTable} or {@link RawAsyncTable} instance.
+   */
+  T build();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java
new file mode 100644
index 0000000..766895e
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java
@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts;
+
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Base class for all asynchronous table builders.
+ */
+@InterfaceAudience.Private
+abstract class AsyncTableBuilderBase<T extends AsyncTableBase> implements AsyncTableBuilder<T> {
+
+  protected TableName tableName;
+
+  protected long operationTimeoutNs;
+
+  protected long scanTimeoutNs;
+
+  protected long rpcTimeoutNs;
+
+  protected long readRpcTimeoutNs;
+
+  protected long writeRpcTimeoutNs;
+
+  protected long pauseNs;
+
+  protected int maxAttempts;
+
+  protected int startLogErrorsCnt;
+
+  AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) {
+    this.tableName = tableName;
+    this.operationTimeoutNs = tableName.isSystemTable() ? connConf.getMetaOperationTimeoutNs()
+        : connConf.getOperationTimeoutNs();
+    this.scanTimeoutNs = connConf.getScanTimeoutNs();
+    this.rpcTimeoutNs = connConf.getRpcTimeoutNs();
+    this.readRpcTimeoutNs = connConf.getReadRpcTimeoutNs();
+    this.writeRpcTimeoutNs = connConf.getWriteRpcTimeoutNs();
+    this.pauseNs = connConf.getPauseNs();
+    this.maxAttempts = retries2Attempts(connConf.getMaxRetries());
+    this.startLogErrorsCnt = connConf.getStartLogErrorsCnt();
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setOperationTimeout(long timeout, TimeUnit unit) {
+    this.operationTimeoutNs = unit.toNanos(timeout);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setScanTimeout(long timeout, TimeUnit unit) {
+    this.scanTimeoutNs = unit.toNanos(timeout);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setRpcTimeout(long timeout, TimeUnit unit) {
+    this.rpcTimeoutNs = unit.toNanos(timeout);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setReadRpcTimeout(long timeout, TimeUnit unit) {
+    this.readRpcTimeoutNs = unit.toNanos(timeout);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setWriteRpcTimeout(long timeout, TimeUnit unit) {
+    this.writeRpcTimeoutNs = unit.toNanos(timeout);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setRetryPause(long pause, TimeUnit unit) {
+    this.pauseNs = unit.toNanos(pause);
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setMaxAttempts(int maxAttempts) {
+    this.maxAttempts = maxAttempts;
+    return this;
+  }
+
+  @Override
+  public AsyncTableBuilderBase<T> setStartLogErrorsCnt(int startLogErrorsCnt) {
+    this.startLogErrorsCnt = startLogErrorsCnt;
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
index 7281185..7cd257c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java
@@ -22,7 +22,7 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
+import static java.util.stream.Collectors.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
@@ -42,8 +42,8 @@ class AsyncTableImpl implements AsyncTable {
 
   private final long defaultScannerMaxResultSize;
 
-  public AsyncTableImpl(AsyncConnectionImpl conn, TableName tableName, ExecutorService pool) {
-    this.rawTable = conn.getRawTable(tableName);
+  AsyncTableImpl(AsyncConnectionImpl conn, RawAsyncTable rawTable, ExecutorService pool) {
+    this.rawTable = rawTable;
     this.pool = pool;
     this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize();
   }
@@ -59,8 +59,8 @@ class AsyncTableImpl implements AsyncTable {
   }
 
   @Override
-  public void setReadRpcTimeout(long timeout, TimeUnit unit) {
-    rawTable.setReadRpcTimeout(timeout, unit);
+  public long getRpcTimeout(TimeUnit unit) {
+    return rawTable.getRpcTimeout(unit);
   }
 
   @Override
@@ -69,31 +69,16 @@ class AsyncTableImpl implements AsyncTable {
   }
 
   @Override
-  public void setWriteRpcTimeout(long timeout, TimeUnit unit) {
-    rawTable.setWriteRpcTimeout(timeout, unit);
-  }
-
-  @Override
   public long getWriteRpcTimeout(TimeUnit unit) {
     return rawTable.getWriteRpcTimeout(unit);
   }
 
   @Override
-  public void setOperationTimeout(long timeout, TimeUnit unit) {
-    rawTable.setOperationTimeout(timeout, unit);
-  }
-
-  @Override
   public long getOperationTimeout(TimeUnit unit) {
     return rawTable.getOperationTimeout(unit);
   }
 
   @Override
-  public void setScanTimeout(long timeout, TimeUnit unit) {
-    rawTable.setScanTimeout(timeout, unit);
-  }
-
-  @Override
   public long getScanTimeout(TimeUnit unit) {
     return rawTable.getScanTimeout(unit);
   }
@@ -194,7 +179,22 @@ class AsyncTableImpl implements AsyncTable {
   }
 
   @Override
+  public List<CompletableFuture<Result>> get(List<Get> gets) {
+    return rawTable.get(gets).stream().map(this::wrap).collect(toList());
+  }
+
+  @Override
+  public List<CompletableFuture<Void>> put(List<Put> puts) {
+    return rawTable.put(puts).stream().map(this::wrap).collect(toList());
+  }
+
+  @Override
+  public List<CompletableFuture<Void>> delete(List<Delete> deletes) {
+    return rawTable.delete(deletes).stream().map(this::wrap).collect(toList());
+  }
+
+  @Override
   public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
-    return rawTable.<T> batch(actions).stream().map(this::wrap).collect(Collectors.toList());
+    return rawTable.<T> batch(actions).stream().map(this::wrap).collect(toList());
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 6f4a844..1abf3f2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -342,16 +342,6 @@ public final class ConnectionUtils {
     return gets.stream().map(ConnectionUtils::toCheckExistenceOnly).collect(toList());
   }
 
-  static List<CompletableFuture<Void>> voidBatch(AsyncTableBase table,
-      List<? extends Row> actions) {
-    return table.<Object> batch(actions).stream().map(f -> f.<Void> thenApply(r -> null))
-        .collect(toList());
-  }
-
-  static CompletableFuture<Void> voidBatchAll(AsyncTableBase table, List<? extends Row> actions) {
-    return table.<Object> batchAll(actions).thenApply(r -> null);
-  }
-
   static RegionLocateType getLocateType(Scan scan) {
     if (scan.isReversed()) {
       if (isEmptyStartRow(scan.getStartRow())) {
@@ -389,4 +379,9 @@ public final class ConnectionUtils {
     // the region.
     return Bytes.compareTo(info.getStartKey(), scan.getStopRow()) <= 0;
   }
+
+  static <T> CompletableFuture<List<T>> allOf(List<CompletableFuture<T>> futures) {
+    return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]))
+        .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList()));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTable.java
index 0c292a6..67099e8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTable.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 /**
  * A low level asynchronous table.
  * <p>
+ * The implementation is required to be thread safe.
+ * <p>
  * The returned {@code CompletableFuture} will be finished directly in the rpc framework's callback
  * thread, so typically you should not do any time consuming work inside these methods, otherwise
  * you will be likely to block at least one connection to RS(even more if the rpc framework uses

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
index 347c85b..d9d2d35 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncTableImpl.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static java.util.stream.Collectors.toList;
 import static org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies;
 
 import java.io.IOException;
@@ -67,24 +68,35 @@ class RawAsyncTableImpl implements RawAsyncTable {
 
   private final long defaultScannerMaxResultSize;
 
-  private long readRpcTimeoutNs;
+  private final long rpcTimeoutNs;
 
-  private long writeRpcTimeoutNs;
+  private final long readRpcTimeoutNs;
 
-  private long operationTimeoutNs;
+  private final long writeRpcTimeoutNs;
 
-  private long scanTimeoutNs;
+  private final long operationTimeoutNs;
 
-  public RawAsyncTableImpl(AsyncConnectionImpl conn, TableName tableName) {
+  private final long scanTimeoutNs;
+
+  private final long pauseNs;
+
+  private final int maxAttempts;
+
+  private final int startLogErrorsCnt;
+
+  RawAsyncTableImpl(AsyncConnectionImpl conn, AsyncTableBuilderBase<?> builder) {
     this.conn = conn;
-    this.tableName = tableName;
-    this.readRpcTimeoutNs = conn.connConf.getReadRpcTimeoutNs();
-    this.writeRpcTimeoutNs = conn.connConf.getWriteRpcTimeoutNs();
-    this.operationTimeoutNs = tableName.isSystemTable() ? conn.connConf.getMetaOperationTimeoutNs()
-        : conn.connConf.getOperationTimeoutNs();
+    this.tableName = builder.tableName;
+    this.rpcTimeoutNs = builder.rpcTimeoutNs;
+    this.readRpcTimeoutNs = builder.readRpcTimeoutNs;
+    this.writeRpcTimeoutNs = builder.writeRpcTimeoutNs;
+    this.operationTimeoutNs = builder.operationTimeoutNs;
+    this.scanTimeoutNs = builder.scanTimeoutNs;
+    this.pauseNs = builder.pauseNs;
+    this.maxAttempts = builder.maxAttempts;
+    this.startLogErrorsCnt = builder.startLogErrorsCnt;
     this.defaultScannerCaching = conn.connConf.getScannerCaching();
     this.defaultScannerMaxResultSize = conn.connConf.getScannerMaxResultSize();
-    this.scanTimeoutNs = conn.connConf.getScanTimeoutNs();
   }
 
   @Override
@@ -178,7 +190,9 @@ class RawAsyncTableImpl implements RawAsyncTable {
   private <T> SingleRequestCallerBuilder<T> newCaller(byte[] row, long rpcTimeoutNs) {
     return conn.callerFactory.<T> single().table(tableName).row(row)
         .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS);
+        .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
+        .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
+        .startLogErrorsCnt(startLogErrorsCnt);
   }
 
   private <T> SingleRequestCallerBuilder<T> newCaller(Row row, long rpcTimeoutNs) {
@@ -214,7 +228,7 @@ class RawAsyncTableImpl implements RawAsyncTable {
   @Override
   public CompletableFuture<Result> append(Append append) {
     checkHasFamilies(append);
-    return this.<Result> newCaller(append, writeRpcTimeoutNs)
+    return this.<Result> newCaller(append, rpcTimeoutNs)
         .action((controller, loc, stub) -> this.<Append, Result> noncedMutate(controller, loc, stub,
           append, RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
         .call();
@@ -223,7 +237,7 @@ class RawAsyncTableImpl implements RawAsyncTable {
   @Override
   public CompletableFuture<Result> increment(Increment increment) {
     checkHasFamilies(increment);
-    return this.<Result> newCaller(increment, writeRpcTimeoutNs)
+    return this.<Result> newCaller(increment, rpcTimeoutNs)
         .action((controller, loc, stub) -> this.<Increment, Result> noncedMutate(controller, loc,
           stub, increment, RequestConverter::buildMutateRequest, RawAsyncTableImpl::toResult))
         .call();
@@ -232,7 +246,7 @@ class RawAsyncTableImpl implements RawAsyncTable {
   @Override
   public CompletableFuture<Boolean> checkAndPut(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Put put) {
-    return this.<Boolean> newCaller(row, writeRpcTimeoutNs)
+    return this.<Boolean> newCaller(row, rpcTimeoutNs)
         .action((controller, loc, stub) -> RawAsyncTableImpl.<Put, Boolean> mutate(controller, loc,
           stub, put,
           (rn, p) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier,
@@ -244,7 +258,7 @@ class RawAsyncTableImpl implements RawAsyncTable {
   @Override
   public CompletableFuture<Boolean> checkAndDelete(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, Delete delete) {
-    return this.<Boolean> newCaller(row, writeRpcTimeoutNs)
+    return this.<Boolean> newCaller(row, rpcTimeoutNs)
         .action((controller, loc, stub) -> RawAsyncTableImpl.<Delete, Boolean> mutate(controller,
           loc, stub, delete,
           (rn, d) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier,
@@ -303,20 +317,18 @@ class RawAsyncTableImpl implements RawAsyncTable {
           RegionAction.Builder regionMutationBuilder = RequestConverter.buildRegionAction(rn, rm);
           regionMutationBuilder.setAtomic(true);
           return MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build();
-        }, (resp) -> {
-          return null;
-        })).call();
+        }, resp -> null)).call();
   }
 
   @Override
   public CompletableFuture<Boolean> checkAndMutate(byte[] row, byte[] family, byte[] qualifier,
       CompareOp compareOp, byte[] value, RowMutations mutation) {
-    return this.<Boolean> newCaller(mutation, writeRpcTimeoutNs)
+    return this.<Boolean> newCaller(mutation, rpcTimeoutNs)
         .action((controller, loc, stub) -> RawAsyncTableImpl.<Boolean> mutateRow(controller, loc,
           stub, mutation,
           (rn, rm) -> RequestConverter.buildMutateRequest(rn, row, family, qualifier,
             new BinaryComparator(value), CompareType.valueOf(compareOp.name()), rm),
-          (resp) -> resp.getExists()))
+          resp -> resp.getExists()))
         .call();
   }
 
@@ -349,7 +361,8 @@ class RawAsyncTableImpl implements RawAsyncTable {
     }
     return conn.callerFactory.smallScan().table(tableName).setScan(setDefaultScanConfig(scan))
         .limit(limit).scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS)
-        .rpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS).call();
+        .rpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
+        .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).call();
   }
 
   public void scan(Scan scan, RawScanResultConsumer consumer) {
@@ -362,55 +375,63 @@ class RawAsyncTableImpl implements RawAsyncTable {
       }
     }
     scan = setDefaultScanConfig(scan);
-    new AsyncClientScanner(scan, consumer, tableName, conn, scanTimeoutNs, readRpcTimeoutNs)
-        .start();
+    new AsyncClientScanner(scan, consumer, tableName, conn, pauseNs, maxAttempts, scanTimeoutNs,
+        readRpcTimeoutNs, startLogErrorsCnt).start();
   }
 
   @Override
-  public void setReadRpcTimeout(long timeout, TimeUnit unit) {
-    this.readRpcTimeoutNs = unit.toNanos(timeout);
+  public List<CompletableFuture<Result>> get(List<Get> gets) {
+    return batch(gets, readRpcTimeoutNs);
   }
 
   @Override
-  public long getReadRpcTimeout(TimeUnit unit) {
-    return unit.convert(readRpcTimeoutNs, TimeUnit.NANOSECONDS);
+  public List<CompletableFuture<Void>> put(List<Put> puts) {
+    return voidMutate(puts);
   }
-
   @Override
-  public void setWriteRpcTimeout(long timeout, TimeUnit unit) {
-    this.writeRpcTimeoutNs = unit.toNanos(timeout);
+  public List<CompletableFuture<Void>> delete(List<Delete> deletes) {
+    return voidMutate(deletes);
   }
 
   @Override
-  public long getWriteRpcTimeout(TimeUnit unit) {
-    return unit.convert(writeRpcTimeoutNs, TimeUnit.NANOSECONDS);
+  public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
+    return batch(actions, rpcTimeoutNs);
+  }
+
+  private List<CompletableFuture<Void>> voidMutate(List<? extends Row> actions) {
+    return this.<Object> batch(actions, writeRpcTimeoutNs).stream()
+        .map(f -> f.<Void> thenApply(r -> null)).collect(toList());
+  }
+
+  private <T> List<CompletableFuture<T>> batch(List<? extends Row> actions, long rpcTimeoutNs) {
+    return conn.callerFactory.batch().table(tableName).actions(actions)
+        .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
+        .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS)
+        .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt).call();
   }
 
   @Override
-  public void setOperationTimeout(long timeout, TimeUnit unit) {
-    this.operationTimeoutNs = unit.toNanos(timeout);
+  public long getRpcTimeout(TimeUnit unit) {
+    return unit.convert(rpcTimeoutNs, TimeUnit.NANOSECONDS);
   }
 
   @Override
-  public long getOperationTimeout(TimeUnit unit) {
-    return unit.convert(operationTimeoutNs, TimeUnit.NANOSECONDS);
+  public long getReadRpcTimeout(TimeUnit unit) {
+    return unit.convert(readRpcTimeoutNs, TimeUnit.NANOSECONDS);
   }
 
   @Override
-  public void setScanTimeout(long timeout, TimeUnit unit) {
-    this.scanTimeoutNs = unit.toNanos(timeout);
+  public long getWriteRpcTimeout(TimeUnit unit) {
+    return unit.convert(writeRpcTimeoutNs, TimeUnit.NANOSECONDS);
   }
 
   @Override
-  public long getScanTimeout(TimeUnit unit) {
-    return TimeUnit.NANOSECONDS.convert(scanTimeoutNs, unit);
+  public long getOperationTimeout(TimeUnit unit) {
+    return unit.convert(operationTimeoutNs, TimeUnit.NANOSECONDS);
   }
 
   @Override
-  public <T> List<CompletableFuture<T>> batch(List<? extends Row> actions) {
-    return conn.callerFactory.batch().table(tableName).actions(actions)
-        .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
-        .readRpcTimeout(readRpcTimeoutNs, TimeUnit.NANOSECONDS)
-        .writeRpcTimeout(writeRpcTimeoutNs, TimeUnit.NANOSECONDS).call();
+  public long getScanTimeout(TimeUnit unit) {
+    return unit.convert(scanTimeoutNs, TimeUnit.NANOSECONDS);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 1eec691..609e9a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -875,10 +875,7 @@ public final class HConstants {
 
   /**
    * timeout for each RPC
-   * @deprecated Use {@link #HBASE_RPC_READ_TIMEOUT_KEY} or {@link #HBASE_RPC_WRITE_TIMEOUT_KEY}
-   * instead.
    */
-  @Deprecated
   public static final String HBASE_RPC_TIMEOUT_KEY = "hbase.rpc.timeout";
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
index 4a391e0..7f54449 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncSingleRequestRpcRetryingCaller.java
@@ -30,16 +30,14 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -60,7 +58,7 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
 
   private static byte[] VALUE = Bytes.toBytes("value");
 
-  private AsyncConnectionImpl asyncConn;
+  private static AsyncConnectionImpl CONN;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -68,38 +66,24 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
     TEST_UTIL.getAdmin().setBalancerRunning(false, true);
     TEST_UTIL.createTable(TABLE_NAME, FAMILY);
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
+    CONN = new AsyncConnectionImpl(TEST_UTIL.getConfiguration(), User.getCurrent());
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
+    IOUtils.closeQuietly(CONN);
     TEST_UTIL.shutdownMiniCluster();
   }
 
-  @After
-  public void tearDown() {
-    if (asyncConn != null) {
-      asyncConn.close();
-      asyncConn = null;
-    }
-  }
-
-  private void initConn(int startLogErrorsCnt, long pauseMs, int maxRetires) throws IOException {
-    Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
-    conf.setInt(AsyncProcess.START_LOG_ERRORS_AFTER_COUNT_KEY, startLogErrorsCnt);
-    conf.setLong(HConstants.HBASE_CLIENT_PAUSE, pauseMs);
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, maxRetires);
-    asyncConn = new AsyncConnectionImpl(conf, User.getCurrent());
-  }
-
   @Test
   public void testRegionMove() throws InterruptedException, ExecutionException, IOException {
-    initConn(0, 100, 30);
     // This will leave a cached entry in location cache
-    HRegionLocation loc = asyncConn.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get();
+    HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get();
     int index = TEST_UTIL.getHBaseCluster().getServerWith(loc.getRegionInfo().getRegionName());
     TEST_UTIL.getAdmin().move(loc.getRegionInfo().getEncodedNameAsBytes(), Bytes.toBytes(
       TEST_UTIL.getHBaseCluster().getRegionServer(1 - index).getServerName().getServerName()));
-    RawAsyncTable table = asyncConn.getRawTable(TABLE_NAME);
+    RawAsyncTable table = CONN.getRawTableBuilder(TABLE_NAME).setRetryPause(100, TimeUnit.MILLISECONDS)
+        .setMaxRetries(30).build();
     table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get();
 
     // move back
@@ -117,9 +101,9 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
 
   @Test
   public void testMaxRetries() throws IOException, InterruptedException {
-    initConn(0, 10, 2);
     try {
-      asyncConn.callerFactory.single().table(TABLE_NAME).row(ROW).operationTimeout(1, TimeUnit.DAYS)
+      CONN.callerFactory.single().table(TABLE_NAME).row(ROW).operationTimeout(1, TimeUnit.DAYS)
+          .maxAttempts(3).pause(10, TimeUnit.MILLISECONDS)
           .action((controller, loc, stub) -> failedFuture()).call().get();
       fail();
     } catch (ExecutionException e) {
@@ -129,14 +113,14 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
 
   @Test
   public void testOperationTimeout() throws IOException, InterruptedException {
-    initConn(0, 100, Integer.MAX_VALUE);
     long startNs = System.nanoTime();
     try {
-      asyncConn.callerFactory.single().table(TABLE_NAME).row(ROW)
-          .operationTimeout(1, TimeUnit.SECONDS).action((controller, loc, stub) -> failedFuture())
-          .call().get();
+      CONN.callerFactory.single().table(TABLE_NAME).row(ROW).operationTimeout(1, TimeUnit.SECONDS)
+          .pause(100, TimeUnit.MILLISECONDS).maxAttempts(Integer.MAX_VALUE)
+          .action((controller, loc, stub) -> failedFuture()).call().get();
       fail();
     } catch (ExecutionException e) {
+      e.printStackTrace();
       assertThat(e.getCause(), instanceOf(RetriesExhaustedException.class));
     }
     long costNs = System.nanoTime() - startNs;
@@ -146,12 +130,11 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
 
   @Test
   public void testLocateError() throws IOException, InterruptedException, ExecutionException {
-    initConn(0, 100, 5);
     AtomicBoolean errorTriggered = new AtomicBoolean(false);
     AtomicInteger count = new AtomicInteger(0);
-    HRegionLocation loc = asyncConn.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get();
+    HRegionLocation loc = CONN.getRegionLocator(TABLE_NAME).getRegionLocation(ROW).get();
     AsyncRegionLocator mockedLocator =
-        new AsyncRegionLocator(asyncConn, AsyncConnectionImpl.RETRY_TIMER) {
+        new AsyncRegionLocator(CONN, AsyncConnectionImpl.RETRY_TIMER) {
           @Override
           CompletableFuture<HRegionLocation> getRegionLocation(TableName tableName, byte[] row,
               RegionLocateType locateType, long timeoutNs) {
@@ -174,14 +157,15 @@ public class TestAsyncSingleRequestRpcRetryingCaller {
           }
         };
     try (AsyncConnectionImpl mockedConn =
-        new AsyncConnectionImpl(asyncConn.getConfiguration(), User.getCurrent()) {
+        new AsyncConnectionImpl(CONN.getConfiguration(), User.getCurrent()) {
 
           @Override
           AsyncRegionLocator getLocator() {
             return mockedLocator;
           }
         }) {
-      RawAsyncTable table = new RawAsyncTableImpl(mockedConn, TABLE_NAME);
+      RawAsyncTable table = mockedConn.getRawTableBuilder(TABLE_NAME)
+          .setRetryPause(100, TimeUnit.MILLISECONDS).setMaxRetries(5).build();
       table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, VALUE)).get();
       assertTrue(errorTriggered.get());
       errorTriggered.set(false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 82fe3cd..880114a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -18,8 +18,6 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
-import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
-import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.TABLES_ON_MASTER;
 import static org.junit.Assert.assertEquals;
 
@@ -33,6 +31,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
@@ -72,6 +71,8 @@ public class TestAsyncTableGetMultiThreaded {
 
   private static AsyncConnection CONN;
 
+  private static RawAsyncTable TABLE;
+
   private static byte[][] SPLIT_KEYS;
 
   @BeforeClass
@@ -79,14 +80,13 @@ public class TestAsyncTableGetMultiThreaded {
     setUp(HColumnDescriptor.MemoryCompaction.NONE);
   }
 
-  protected static void setUp(HColumnDescriptor.MemoryCompaction memoryCompaction) throws Exception {
+  protected static void setUp(HColumnDescriptor.MemoryCompaction memoryCompaction)
+      throws Exception {
     TEST_UTIL.getConfiguration().set(TABLES_ON_MASTER, "none");
     TEST_UTIL.getConfiguration().setLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 60000L);
-    TEST_UTIL.getConfiguration().setLong(HBASE_RPC_READ_TIMEOUT_KEY, 1000L);
-    TEST_UTIL.getConfiguration().setInt(HBASE_CLIENT_RETRIES_NUMBER, 1000);
     TEST_UTIL.getConfiguration().setInt(ByteBufferPool.MAX_POOL_SIZE_KEY, 100);
     TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-        String.valueOf(memoryCompaction));
+      String.valueOf(memoryCompaction));
 
     TEST_UTIL.startMiniCluster(5);
     SPLIT_KEYS = new byte[8][];
@@ -96,10 +96,11 @@ public class TestAsyncTableGetMultiThreaded {
     TEST_UTIL.createTable(TABLE_NAME, FAMILY);
     TEST_UTIL.waitTableAvailable(TABLE_NAME);
     CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
-    CONN.getRawTable(TABLE_NAME)
-        .putAll(
-          IntStream.range(0, COUNT).mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
-              .addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))).collect(Collectors.toList()))
+    TABLE = CONN.getRawTableBuilder(TABLE_NAME).setReadRpcTimeout(1, TimeUnit.SECONDS)
+        .setMaxRetries(1000).build();
+    TABLE.putAll(
+      IntStream.range(0, COUNT).mapToObj(i -> new Put(Bytes.toBytes(String.format("%03d", i)))
+          .addColumn(FAMILY, QUALIFIER, Bytes.toBytes(i))).collect(Collectors.toList()))
         .get();
   }
 
@@ -112,11 +113,8 @@ public class TestAsyncTableGetMultiThreaded {
   private void run(AtomicBoolean stop) throws InterruptedException, ExecutionException {
     while (!stop.get()) {
       for (int i = 0; i < COUNT; i++) {
-        assertEquals(i,
-            Bytes.toInt(
-                CONN.getRawTable(TABLE_NAME).get(new Get(Bytes.toBytes(String.format("%03d", i))))
-                    .get()
-                    .getValue(FAMILY, QUALIFIER)));
+        assertEquals(i, Bytes.toInt(TABLE.get(new Get(Bytes.toBytes(String.format("%03d", i))))
+            .get().getValue(FAMILY, QUALIFIER)));
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4ab95ebb/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
index 270e3e1..9f3970b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRawAsyncTableScan.java
@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Queue;
-import java.util.concurrent.TimeUnit;
 import java.util.function.Supplier;
 
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -122,10 +121,7 @@ public class TestRawAsyncTableScan extends AbstractTestAsyncTableScan {
   @Override
   protected List<Result> doScan(Scan scan) throws Exception {
     SimpleRawScanResultConsumer scanConsumer = new SimpleRawScanResultConsumer();
-    RawAsyncTable table = ASYNC_CONN.getRawTable(TABLE_NAME);
-    table.setScanTimeout(1, TimeUnit.HOURS);
-    table.setReadRpcTimeout(1, TimeUnit.HOURS);
-    table.scan(scan, scanConsumer);
+    ASYNC_CONN.getRawTable(TABLE_NAME).scan(scan, scanConsumer);
     List<Result> results = new ArrayList<>();
     for (Result result; (result = scanConsumer.take()) != null;) {
       results.add(result);


[16/50] [abbrv] hbase git commit: HBASE-17396 Add first async admin impl and implement balance methods

Posted by el...@apache.org.
HBASE-17396 Add first async admin impl and implement balance methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cb9ce2ce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cb9ce2ce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cb9ce2ce

Branch: refs/heads/HBASE-16961
Commit: cb9ce2ceafb5467522b1b380956446e40b8250d5
Parents: 8f1d0a2
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Jan 19 10:15:12 2017 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Thu Jan 19 10:15:12 2017 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  64 +++++++
 .../hadoop/hbase/client/AsyncConnection.java    |   9 +
 .../hbase/client/AsyncConnectionImpl.java       | 106 ++++++++++++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    | 144 ++++++++++++++++
 .../AsyncMasterRequestRpcRetryingCaller.java    |  73 ++++++++
 .../hbase/client/AsyncRpcRetryingCaller.java    | 151 +++++++++++++++++
 .../client/AsyncRpcRetryingCallerFactory.java   |  55 ++++++
 .../AsyncSingleRequestRpcRetryingCaller.java    | 169 ++++---------------
 .../hadoop/hbase/client/TestAsyncAdmin.java     |  87 ++++++++++
 9 files changed, 720 insertions(+), 138 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
new file mode 100644
index 0000000..fadeebe
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ *  The asynchronous administrative API for HBase.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public interface AsyncAdmin {
+
+  /**
+   * Turn the load balancer on or off.
+   * @param on
+   * @return Previous balancer value wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> setBalancerRunning(final boolean on) throws IOException;
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
+   * reassignments. Can NOT run for various reasons. Check logs.
+   * @return True if balancer ran, false otherwise. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> balancer() throws IOException;
+
+  /**
+   * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
+   * reassignments. If there is region in transition, force parameter of true would still run
+   * balancer. Can *not* run for other reasons. Check logs.
+   * @param force whether we should force balance even if there is region in transition.
+   * @return True if balancer ran, false otherwise. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> balancer(boolean force) throws IOException;
+
+  /**
+   * Query the current state of the balancer.
+   * @return true if the balancer is enabled, false otherwise.
+   *         The return value will be wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> isBalancerEnabled() throws IOException;
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
index 9f114ac..dbe32ca 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java
@@ -96,4 +96,13 @@ public interface AsyncConnection extends Closeable {
    * @param pool the thread pool to use for executing callback
    */
   AsyncTableBuilder<AsyncTable> getTableBuilder(TableName tableName, ExecutorService pool);
+
+  /**
+   * Retrieve an AsyncAdmin implementation to administer an HBase cluster. The returned AsyncAdmin
+   * is not guaranteed to be thread-safe. A new instance should be created for each using thread.
+   * This is a lightweight operation. Pooling or caching of the returned AsyncAdmin is not
+   * recommended.
+   * @return an AsyncAdmin instance for cluster administration
+   */
+  AsyncAdmin getAdmin();
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
index c58500a..bc6a3b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java
@@ -28,23 +28,32 @@ import io.netty.util.HashedWheelTimer;
 
 import java.io.IOException;
 import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcClient;
 import org.apache.hadoop.hbase.ipc.RpcClientFactory;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
 import org.apache.hadoop.hbase.util.CollectionUtils;
 import org.apache.hadoop.hbase.util.Threads;
 
@@ -88,6 +97,11 @@ class AsyncConnectionImpl implements AsyncConnection {
 
   private final ConcurrentMap<String, ClientService.Interface> rsStubs = new ConcurrentHashMap<>();
 
+  private final AtomicReference<MasterService.Interface> masterStub = new AtomicReference<>();
+
+  private final AtomicReference<CompletableFuture<MasterService.Interface>> masterStubMakeFuture =
+      new AtomicReference<>();
+
   public AsyncConnectionImpl(Configuration conf, User user) {
     this.conf = conf;
     this.user = user;
@@ -149,6 +163,93 @@ class AsyncConnectionImpl implements AsyncConnection {
       () -> createRegionServerStub(serverName));
   }
 
+  private MasterService.Interface createMasterStub(ServerName serverName) throws IOException {
+    return MasterService.newStub(rpcClient.createRpcChannel(serverName, user, rpcTimeout));
+  }
+
+  private void makeMasterStub(CompletableFuture<MasterService.Interface> future) {
+    registry.getMasterAddress().whenComplete(
+      (sn, error) -> {
+        if (sn == null) {
+          String msg = "ZooKeeper available but no active master location found";
+          LOG.info(msg);
+          this.masterStubMakeFuture.getAndSet(null).completeExceptionally(
+            new MasterNotRunningException(msg));
+          return;
+        }
+        try {
+          MasterService.Interface stub = createMasterStub(sn);
+          HBaseRpcController controller = getRpcController();
+          stub.isMasterRunning(controller, RequestConverter.buildIsMasterRunningRequest(),
+            new RpcCallback<IsMasterRunningResponse>() {
+              @Override
+              public void run(IsMasterRunningResponse resp) {
+                if (controller.failed() || resp == null
+                    || (resp != null && !resp.getIsMasterRunning())) {
+                  masterStubMakeFuture.getAndSet(null).completeExceptionally(
+                    new MasterNotRunningException("Master connection is not running anymore"));
+                } else {
+                  masterStub.set(stub);
+                  masterStubMakeFuture.set(null);
+                  future.complete(stub);
+                }
+              }
+            });
+        } catch (IOException e) {
+          this.masterStubMakeFuture.getAndSet(null).completeExceptionally(
+            new IOException("Failed to create async master stub", e));
+        }
+      });
+  }
+
+  CompletableFuture<MasterService.Interface> getMasterStub() {
+    MasterService.Interface masterStub = this.masterStub.get();
+
+    if (masterStub == null) {
+      for (;;) {
+        if (this.masterStubMakeFuture.compareAndSet(null, new CompletableFuture<>())) {
+          CompletableFuture<MasterService.Interface> future = this.masterStubMakeFuture.get();
+          makeMasterStub(future);
+        } else {
+          CompletableFuture<MasterService.Interface> future = this.masterStubMakeFuture.get();
+          if (future != null) {
+            return future;
+          }
+        }
+      }
+    }
+
+    for (;;) {
+      if (masterStubMakeFuture.compareAndSet(null, new CompletableFuture<>())) {
+        CompletableFuture<MasterService.Interface> future = masterStubMakeFuture.get();
+        HBaseRpcController controller = getRpcController();
+        masterStub.isMasterRunning(controller, RequestConverter.buildIsMasterRunningRequest(),
+          new RpcCallback<IsMasterRunningResponse>() {
+            @Override
+            public void run(IsMasterRunningResponse resp) {
+              if (controller.failed() || resp == null
+                  || (resp != null && !resp.getIsMasterRunning())) {
+                makeMasterStub(future);
+              } else {
+                future.complete(masterStub);
+              }
+            }
+          });
+      } else {
+        CompletableFuture<MasterService.Interface> future = masterStubMakeFuture.get();
+        if (future != null) {
+          return future;
+        }
+      }
+    }
+  }
+
+  private HBaseRpcController getRpcController() {
+    HBaseRpcController controller = this.rpcControllerFactory.newController();
+    controller.setCallTimeout((int) TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs()));
+    return controller;
+  }
+
   @Override
   public AsyncTableBuilder<RawAsyncTable> getRawTableBuilder(TableName tableName) {
     return new AsyncTableBuilderBase<RawAsyncTable>(tableName, connConf) {
@@ -171,4 +272,9 @@ class AsyncConnectionImpl implements AsyncConnection {
       }
     };
   }
+
+  @Override
+  public AsyncAdmin getAdmin() {
+    return new AsyncHBaseAdmin(this);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
new file mode 100644
index 0000000..1dd92e5
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -0,0 +1,144 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
+
+/**
+ * The implementation of AsyncAdmin.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class AsyncHBaseAdmin implements AsyncAdmin {
+
+  private final AsyncConnectionImpl connection;
+
+  private final long rpcTimeoutNs;
+
+  private final long operationTimeoutNs;
+
+  private final long pauseNs;
+
+  private final int maxAttempts;
+
+  private final int startLogErrorsCnt;
+
+  AsyncHBaseAdmin(AsyncConnectionImpl connection) {
+    this.connection = connection;
+    this.rpcTimeoutNs = connection.connConf.getRpcTimeoutNs();
+    this.operationTimeoutNs = connection.connConf.getOperationTimeoutNs();
+    this.pauseNs = connection.connConf.getPauseNs();
+    this.maxAttempts = connection.connConf.getMaxRetries();
+    this.startLogErrorsCnt = connection.connConf.getStartLogErrorsCnt();
+  }
+
+  private <T> MasterRequestCallerBuilder<T> newCaller() {
+    return this.connection.callerFactory.<T> masterRequest()
+        .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS)
+        .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS)
+        .pause(pauseNs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts)
+        .startLogErrorsCnt(startLogErrorsCnt);
+  }
+
+  @FunctionalInterface
+  private interface RpcCall<RESP, REQ> {
+    void call(MasterService.Interface stub, HBaseRpcController controller, REQ req,
+        RpcCallback<RESP> done);
+  }
+
+  @FunctionalInterface
+  private interface Converter<D, S> {
+    D convert(S src) throws IOException;
+  }
+
+  private <PREQ, PRESP, RESP> CompletableFuture<RESP> call(HBaseRpcController controller,
+      MasterService.Interface stub, PREQ preq, RpcCall<PRESP, PREQ> rpcCall,
+      Converter<RESP, PRESP> respConverter) {
+    CompletableFuture<RESP> future = new CompletableFuture<>();
+    rpcCall.call(stub, controller, preq, new RpcCallback<PRESP>() {
+
+      @Override
+      public void run(PRESP resp) {
+        if (controller.failed()) {
+          future.completeExceptionally(controller.getFailed());
+        } else {
+          try {
+            future.complete(respConverter.convert(resp));
+          } catch (IOException e) {
+            future.completeExceptionally(e);
+          }
+        }
+      }
+    });
+    return future;
+  }
+
+  @Override
+  public CompletableFuture<Boolean> setBalancerRunning(final boolean on) throws IOException {
+    return this
+        .<Boolean> newCaller()
+        .action(
+          (controller, stub) -> this
+              .<SetBalancerRunningRequest, SetBalancerRunningResponse, Boolean> call(controller,
+                stub, RequestConverter.buildSetBalancerRunningRequest(on, true),
+                (s, c, req, done) -> s.setBalancerRunning(c, req, done),
+                (resp) -> resp.getPrevBalanceValue())).call();
+  }
+
+  @Override
+  public CompletableFuture<Boolean> balancer() throws IOException {
+    return balancer(false);
+  }
+
+  @Override
+  public CompletableFuture<Boolean> balancer(boolean force) throws IOException {
+    return this
+        .<Boolean> newCaller()
+        .action(
+          (controller, stub) -> this.<BalanceRequest, BalanceResponse, Boolean> call(controller,
+            stub, RequestConverter.buildBalanceRequest(force),
+            (s, c, req, done) -> s.balance(c, req, done), (resp) -> resp.getBalancerRan())).call();
+  }
+
+  @Override
+  public CompletableFuture<Boolean> isBalancerEnabled() throws IOException {
+    return this
+        .<Boolean> newCaller()
+        .action(
+          (controller, stub) -> this.<IsBalancerEnabledRequest, IsBalancerEnabledResponse, Boolean> call(
+            controller, stub, RequestConverter.buildIsBalancerEnabledRequest(),
+            (s, c, req, done) -> s.isBalancerEnabled(c, req, done), (resp) -> resp.getEnabled()))
+        .call();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java
new file mode 100644
index 0000000..e2a3fee
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import io.netty.util.HashedWheelTimer;
+
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
+
+/**
+ * Retry caller for a request call to master.
+ */
+@InterfaceAudience.Private
+public class AsyncMasterRequestRpcRetryingCaller<T> extends AsyncRpcRetryingCaller<T> {
+
+  @FunctionalInterface
+  public interface Callable<T> {
+    CompletableFuture<T> call(HBaseRpcController controller, MasterService.Interface stub);
+  }
+
+  private final Callable<T> callable;
+
+  public AsyncMasterRequestRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
+      Callable<T> callable, long pauseNs, int maxRetries, long operationTimeoutNs,
+      long rpcTimeoutNs, int startLogErrorsCnt) {
+    super(retryTimer, conn, pauseNs, maxRetries, operationTimeoutNs, rpcTimeoutNs,
+        startLogErrorsCnt);
+    this.callable = callable;
+  }
+
+  @Override
+  protected void doCall() {
+    conn.getMasterStub().whenComplete((stub, error) -> {
+      if (error != null) {
+        onError(error, () -> "Get async master stub failed", err -> {
+        });
+        return;
+      }
+      resetCallTimeout();
+      callable.call(controller, stub).whenComplete((result, error2) -> {
+        if (error2 != null) {
+          onError(error2, () -> "Call to master failed", err -> {
+          });
+          return;
+        }
+        future.complete(result);
+      });
+    });
+  }
+
+  public CompletableFuture<T> call() {
+    doCall();
+    return future;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java
new file mode 100644
index 0000000..d449db1
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java
@@ -0,0 +1,151 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
+import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
+import io.netty.util.HashedWheelTimer;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Consumer;
+import java.util.function.Supplier;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.ipc.HBaseRpcController;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+@InterfaceAudience.Private
+public abstract class AsyncRpcRetryingCaller<T> {
+
+  private static final Log LOG = LogFactory.getLog(AsyncRpcRetryingCaller.class);
+
+  private final HashedWheelTimer retryTimer;
+
+  private final long startNs;
+
+  private final long pauseNs;
+
+  private int tries = 1;
+
+  private final int maxAttempts;
+
+  private final int startLogErrorsCnt;
+
+  private final List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions;
+
+  private final long rpcTimeoutNs;
+
+  protected final long operationTimeoutNs;
+
+  protected final AsyncConnectionImpl conn;
+
+  protected final CompletableFuture<T> future;
+
+  protected final HBaseRpcController controller;
+
+  public AsyncRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
+      long pauseNs, int maxAttempts, long operationTimeoutNs,
+      long rpcTimeoutNs, int startLogErrorsCnt) {
+    this.retryTimer = retryTimer;
+    this.conn = conn;
+    this.pauseNs = pauseNs;
+    this.maxAttempts = maxAttempts;
+    this.operationTimeoutNs = operationTimeoutNs;
+    this.rpcTimeoutNs = rpcTimeoutNs;
+    this.startLogErrorsCnt = startLogErrorsCnt;
+    this.future = new CompletableFuture<>();
+    this.controller = conn.rpcControllerFactory.newController();
+    this.exceptions = new ArrayList<>();
+    this.startNs = System.nanoTime();
+  }
+
+  private long elapsedMs() {
+    return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
+  }
+
+  protected long remainingTimeNs() {
+    return operationTimeoutNs - (System.nanoTime() - startNs);
+  }
+
+  protected void completeExceptionally() {
+    future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
+  }
+
+  protected void resetCallTimeout() {
+    long callTimeoutNs;
+    if (operationTimeoutNs > 0) {
+      callTimeoutNs = remainingTimeNs();
+      if (callTimeoutNs <= 0) {
+        completeExceptionally();
+        return;
+      }
+      callTimeoutNs = Math.min(callTimeoutNs, rpcTimeoutNs);
+    } else {
+      callTimeoutNs = rpcTimeoutNs;
+    }
+    resetController(controller, callTimeoutNs);
+  }
+
+  protected void onError(Throwable error, Supplier<String> errMsg,
+      Consumer<Throwable> updateCachedLocation) {
+    error = translateException(error);
+    if (tries > startLogErrorsCnt) {
+      LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts
+          + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs)
+          + " ms, time elapsed = " + elapsedMs() + " ms", error);
+    }
+    RetriesExhaustedException.ThrowableWithExtraContext qt = new RetriesExhaustedException.ThrowableWithExtraContext(
+        error, EnvironmentEdgeManager.currentTime(), "");
+    exceptions.add(qt);
+    if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
+      completeExceptionally();
+      return;
+    }
+    long delayNs;
+    if (operationTimeoutNs > 0) {
+      long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS;
+      if (maxDelayNs <= 0) {
+        completeExceptionally();
+        return;
+      }
+      delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1));
+    } else {
+      delayNs = getPauseTime(pauseNs, tries - 1);
+    }
+    updateCachedLocation.accept(error);
+    tries++;
+    retryTimer.newTimeout(t -> doCall(), delayNs, TimeUnit.NANOSECONDS);
+  }
+
+  protected abstract void doCall();
+
+  CompletableFuture<T> call() {
+    doCall();
+    return future;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
index 76b6a33..5df66cc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java
@@ -369,4 +369,59 @@ class AsyncRpcRetryingCallerFactory {
   public BatchCallerBuilder batch() {
     return new BatchCallerBuilder();
   }
+
+  public class MasterRequestCallerBuilder<T> extends BuilderBase {
+    private AsyncMasterRequestRpcRetryingCaller.Callable<T> callable;
+
+    private long operationTimeoutNs = -1L;
+
+    private long rpcTimeoutNs = -1L;
+
+    public MasterRequestCallerBuilder<T> action(AsyncMasterRequestRpcRetryingCaller.Callable<T> callable) {
+      this.callable = callable;
+      return this;
+    }
+
+    public MasterRequestCallerBuilder<T> operationTimeout(long operationTimeout, TimeUnit unit) {
+      this.operationTimeoutNs = unit.toNanos(operationTimeout);
+      return this;
+    }
+
+    public MasterRequestCallerBuilder<T> rpcTimeout(long rpcTimeout, TimeUnit unit) {
+      this.rpcTimeoutNs = unit.toNanos(rpcTimeout);
+      return this;
+    }
+
+    public MasterRequestCallerBuilder<T> pause(long pause, TimeUnit unit) {
+      this.pauseNs = unit.toNanos(pause);
+      return this;
+    }
+
+    public MasterRequestCallerBuilder<T> maxAttempts(int maxAttempts) {
+      this.maxAttempts = maxAttempts;
+      return this;
+    }
+
+    public MasterRequestCallerBuilder<T> startLogErrorsCnt(int startLogErrorsCnt) {
+      this.startLogErrorsCnt = startLogErrorsCnt;
+      return this;
+    }
+
+    public AsyncMasterRequestRpcRetryingCaller<T> build() {
+      return new AsyncMasterRequestRpcRetryingCaller<T>(retryTimer, conn, checkNotNull(callable,
+        "action is null"), pauseNs, maxAttempts, operationTimeoutNs, rpcTimeoutNs,
+          startLogErrorsCnt);
+    }
+
+    /**
+     * Shortcut for {@code build().call()}
+     */
+    public CompletableFuture<T> call() {
+      return build().call();
+    }
+  }
+
+  public <T> MasterRequestCallerBuilder<T> masterRequest() {
+    return new MasterRequestCallerBuilder<>();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
index 4ce6a18..e1c06d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java
@@ -17,39 +17,23 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.getPauseTime;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.resetController;
-import static org.apache.hadoop.hbase.client.ConnectionUtils.translateException;
-
 import io.netty.util.HashedWheelTimer;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Consumer;
-import java.util.function.Supplier;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
  * Retry caller for a single request, such as get, put, delete, etc.
  */
 @InterfaceAudience.Private
-class AsyncSingleRequestRpcRetryingCaller<T> {
-
-  private static final Log LOG = LogFactory.getLog(AsyncSingleRequestRpcRetryingCaller.class);
+class AsyncSingleRequestRpcRetryingCaller<T> extends AsyncRpcRetryingCaller<T> {
 
   @FunctionalInterface
   public interface Callable<T> {
@@ -57,10 +41,6 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
         ClientService.Interface stub);
   }
 
-  private final HashedWheelTimer retryTimer;
-
-  private final AsyncConnectionImpl conn;
-
   private final TableName tableName;
 
   private final byte[] row;
@@ -69,131 +49,45 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
 
   private final Callable<T> callable;
 
-  private final long pauseNs;
-
-  private final int maxAttempts;
-
-  private final long operationTimeoutNs;
-
-  private final long rpcTimeoutNs;
-
-  private final int startLogErrorsCnt;
-
-  private final CompletableFuture<T> future;
-
-  private final HBaseRpcController controller;
-
-  private final List<RetriesExhaustedException.ThrowableWithExtraContext> exceptions;
-
-  private final long startNs;
-
   public AsyncSingleRequestRpcRetryingCaller(HashedWheelTimer retryTimer, AsyncConnectionImpl conn,
       TableName tableName, byte[] row, RegionLocateType locateType, Callable<T> callable,
       long pauseNs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs,
       int startLogErrorsCnt) {
-    this.retryTimer = retryTimer;
-    this.conn = conn;
+    super(retryTimer, conn, pauseNs, maxAttempts, operationTimeoutNs, rpcTimeoutNs,
+        startLogErrorsCnt);
     this.tableName = tableName;
     this.row = row;
     this.locateType = locateType;
     this.callable = callable;
-    this.pauseNs = pauseNs;
-    this.maxAttempts = maxAttempts;
-    this.operationTimeoutNs = operationTimeoutNs;
-    this.rpcTimeoutNs = rpcTimeoutNs;
-    this.startLogErrorsCnt = startLogErrorsCnt;
-    this.future = new CompletableFuture<>();
-    this.controller = conn.rpcControllerFactory.newController();
-    this.exceptions = new ArrayList<>();
-    this.startNs = System.nanoTime();
-  }
-
-  private int tries = 1;
-
-  private long elapsedMs() {
-    return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNs);
-  }
-
-  private long remainingTimeNs() {
-    return operationTimeoutNs - (System.nanoTime() - startNs);
-  }
-
-  private void completeExceptionally() {
-    future.completeExceptionally(new RetriesExhaustedException(tries - 1, exceptions));
-  }
-
-  private void onError(Throwable error, Supplier<String> errMsg,
-      Consumer<Throwable> updateCachedLocation) {
-    error = translateException(error);
-    if (tries > startLogErrorsCnt) {
-      LOG.warn(errMsg.get(), error);
-    }
-    RetriesExhaustedException.ThrowableWithExtraContext qt =
-        new RetriesExhaustedException.ThrowableWithExtraContext(error,
-            EnvironmentEdgeManager.currentTime(), "");
-    exceptions.add(qt);
-    if (error instanceof DoNotRetryIOException || tries >= maxAttempts) {
-      completeExceptionally();
-      return;
-    }
-    long delayNs;
-    if (operationTimeoutNs > 0) {
-      long maxDelayNs = remainingTimeNs() - SLEEP_DELTA_NS;
-      if (maxDelayNs <= 0) {
-        completeExceptionally();
-        return;
-      }
-      delayNs = Math.min(maxDelayNs, getPauseTime(pauseNs, tries - 1));
-    } else {
-      delayNs = getPauseTime(pauseNs, tries - 1);
-    }
-    updateCachedLocation.accept(error);
-    tries++;
-    retryTimer.newTimeout(t -> locateThenCall(), delayNs, TimeUnit.NANOSECONDS);
   }
 
   private void call(HRegionLocation loc) {
-    long callTimeoutNs;
-    if (operationTimeoutNs > 0) {
-      callTimeoutNs = remainingTimeNs();
-      if (callTimeoutNs <= 0) {
-        completeExceptionally();
-        return;
-      }
-      callTimeoutNs = Math.min(callTimeoutNs, rpcTimeoutNs);
-    } else {
-      callTimeoutNs = rpcTimeoutNs;
-    }
     ClientService.Interface stub;
     try {
       stub = conn.getRegionServerStub(loc.getServerName());
     } catch (IOException e) {
       onError(e,
         () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row)
-            + "' in " + loc.getRegionInfo().getEncodedName() + " of " + tableName
-            + " failed, tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
-            + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
-            + elapsedMs() + " ms",
+            + "' in " + loc.getRegionInfo().getEncodedName() + " of " + tableName + " failed",
         err -> conn.getLocator().updateCachedLocation(loc, err));
       return;
     }
-    resetController(controller, callTimeoutNs);
-    callable.call(controller, loc, stub).whenComplete((result, error) -> {
-      if (error != null) {
-        onError(error,
-          () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in "
-              + loc.getRegionInfo().getEncodedName() + " of " + tableName + " failed, tries = "
-              + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
-              + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
-              + elapsedMs() + " ms",
-          err -> conn.getLocator().updateCachedLocation(loc, err));
-        return;
-      }
-      future.complete(result);
-    });
+    resetCallTimeout();
+    callable.call(controller, loc, stub).whenComplete(
+      (result, error) -> {
+        if (error != null) {
+          onError(error,
+            () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in "
+                + loc.getRegionInfo().getEncodedName() + " of " + tableName + " failed",
+            err -> conn.getLocator().updateCachedLocation(loc, err));
+          return;
+        }
+        future.complete(result);
+      });
   }
 
-  private void locateThenCall() {
+  @Override
+  protected void doCall() {
     long locateTimeoutNs;
     if (operationTimeoutNs > 0) {
       locateTimeoutNs = remainingTimeNs();
@@ -204,24 +98,23 @@ class AsyncSingleRequestRpcRetryingCaller<T> {
     } else {
       locateTimeoutNs = -1L;
     }
-    conn.getLocator().getRegionLocation(tableName, row, locateType, locateTimeoutNs)
-        .whenComplete((loc, error) -> {
-          if (error != null) {
-            onError(error,
-              () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName
-                  + " failed, tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = "
-                  + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + " ms, time elapsed = "
-                  + elapsedMs() + " ms",
-              err -> {
+    conn.getLocator()
+        .getRegionLocation(tableName, row, locateType, locateTimeoutNs)
+        .whenComplete(
+          (loc, error) -> {
+            if (error != null) {
+              onError(error, () -> "Locate '" + Bytes.toStringBinary(row) + "' in " + tableName
+                  + " failed", err -> {
               });
-            return;
-          }
-          call(loc);
-        });
+              return;
+            }
+            call(loc);
+          });
   }
 
+  @Override
   public CompletableFuture<T> call() {
-    locateThenCall();
+    doCall();
     return future;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/cb9ce2ce/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
new file mode 100644
index 0000000..9beae1f
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import static org.junit.Assert.assertEquals;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Class to test AsyncAdmin.
+ */
+@Category({LargeTests.class, ClientTests.class})
+public class TestAsyncAdmin {
+
+  private static final Log LOG = LogFactory.getLog(TestAdmin1.class);
+  private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private static AsyncConnection ASYNC_CONN;
+  private AsyncAdmin admin;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 10);
+    TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 3);
+    TEST_UTIL.getConfiguration().setInt("hbase.rpc.timeout", 1000);
+    TEST_UTIL.startMiniCluster(1);
+    ASYNC_CONN = ConnectionFactory.createAsyncConnection(TEST_UTIL.getConfiguration());
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    IOUtils.closeQuietly(ASYNC_CONN);
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    this.admin = ASYNC_CONN.getAdmin();
+  }
+
+  @Test(timeout = 30000)
+  public void testBalancer() throws Exception {
+    boolean initialState = admin.isBalancerEnabled().get();
+
+    // Start the balancer, wait for it.
+    boolean prevState = admin.setBalancerRunning(!initialState).get();
+
+    // The previous state should be the original state we observed
+    assertEquals(initialState, prevState);
+
+    // Current state should be opposite of the original
+    assertEquals(!initialState, admin.isBalancerEnabled().get());
+
+    // Reset it back to what it was
+    prevState = admin.setBalancerRunning(initialState).get();
+
+    // The previous state should be the opposite of the initial state
+    assertEquals(!initialState, prevState);
+    // Current state should be the original state again
+    assertEquals(initialState, admin.isBalancerEnabled().get());
+  }
+}


[05/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
deleted file mode 100644
index 5bd2ff1..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ /dev/null
@@ -1,485 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.*;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.CoordinatedStateManagerFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Test the {@link RegionMergeTransactionImpl} class against two HRegions (as
- * opposed to running cluster).
- */
-@Category({RegionServerTests.class, SmallTests.class})
-public class TestRegionMergeTransaction {
-  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private final Path testdir = TEST_UTIL.getDataTestDir(this.getClass()
-      .getName());
-  private HRegion region_a;
-  private HRegion region_b;
-  private HRegion region_c;
-  private WALFactory wals;
-  private FileSystem fs;
-  // Start rows of region_a,region_b,region_c
-  private static final byte[] STARTROW_A = new byte[] { 'a', 'a', 'a' };
-  private static final byte[] STARTROW_B = new byte[] { 'g', 'g', 'g' };
-  private static final byte[] STARTROW_C = new byte[] { 'w', 'w', 'w' };
-  private static final byte[] ENDROW = new byte[] { '{', '{', '{' };
-  private static final byte[] CF = HConstants.CATALOG_FAMILY;
-
-  @Before
-  public void setup() throws IOException {
-    this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    this.fs.delete(this.testdir, true);
-    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
-    FSUtils.setRootDir(walConf, this.testdir);
-    this.wals = new WALFactory(walConf, null, TestRegionMergeTransaction.class.getName());
-    this.region_a = createRegion(this.testdir, this.wals, STARTROW_A, STARTROW_B);
-    this.region_b = createRegion(this.testdir, this.wals, STARTROW_B, STARTROW_C);
-    this.region_c = createRegion(this.testdir, this.wals, STARTROW_C, ENDROW);
-    assert region_a != null && region_b != null && region_c != null;
-    TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
-  }
-
-  @After
-  public void teardown() throws IOException {
-    for (HRegion region : new HRegion[] { region_a, region_b, region_c }) {
-      if (region != null && !region.isClosed()) region.close();
-      if (this.fs.exists(region.getRegionFileSystem().getRegionDir())
-          && !this.fs.delete(region.getRegionFileSystem().getRegionDir(), true)) {
-        throw new IOException("Failed deleting of "
-            + region.getRegionFileSystem().getRegionDir());
-      }
-    }
-    if (this.wals != null) {
-      this.wals.close();
-    }
-    this.fs.delete(this.testdir, true);
-  }
-
-  /**
-   * Test straight prepare works. Tries to merge on {@link #region_a} and
-   * {@link #region_b}
-   * @throws IOException
-   */
-  @Test
-  public void testPrepare() throws IOException {
-    prepareOnGoodRegions();
-  }
-
-  private RegionMergeTransactionImpl prepareOnGoodRegions() throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b,
-        false);
-    RegionMergeTransactionImpl spyMT = Mockito.spy(mt);
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_a.getRegionInfo().getRegionName());
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_b.getRegionInfo().getRegionName());
-    assertTrue(spyMT.prepare(null));
-    return spyMT;
-  }
-
-  /**
-   * Test merging the same region
-   */
-  @Test
-  public void testPrepareWithSameRegion() throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a,
-        this.region_a, true);
-    assertFalse("should not merge the same region even if it is forcible ",
-        mt.prepare(null));
-  }
-
-  /**
-   * Test merging two not adjacent regions under a common merge
-   */
-  @Test
-  public void testPrepareWithRegionsNotAdjacent() throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a,
-        this.region_c, false);
-    assertFalse("should not merge two regions if they are adjacent except it is forcible",
-        mt.prepare(null));
-  }
-
-  /**
-   * Test merging two not adjacent regions under a compulsory merge
-   */
-  @Test
-  public void testPrepareWithRegionsNotAdjacentUnderCompulsory()
-      throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_c,
-        true);
-    RegionMergeTransactionImpl spyMT = Mockito.spy(mt);
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_a.getRegionInfo().getRegionName());
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_c.getRegionInfo().getRegionName());
-    assertTrue("Since focible is true, should merge two regions even if they are not adjacent",
-        spyMT.prepare(null));
-  }
-
-  /**
-   * Pass a reference store
-   */
-  @Test
-  public void testPrepareWithRegionsWithReference() throws IOException {
-    HStore storeMock = Mockito.mock(HStore.class);
-    when(storeMock.hasReferences()).thenReturn(true);
-    when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
-    when(storeMock.getSizeToFlush()).thenReturn(new MemstoreSize());
-    when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
-    this.region_a.stores.put(Bytes.toBytes(""), storeMock);
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a,
-        this.region_b, false);
-    assertFalse(
-        "a region should not be mergeable if it has instances of store file references",
-        mt.prepare(null));
-  }
-
-  @Test
-  public void testPrepareWithClosedRegion() throws IOException {
-    this.region_a.close();
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(this.region_a,
-        this.region_b, false);
-    assertFalse(mt.prepare(null));
-  }
-
-  /**
-   * Test merging regions which are merged regions and has reference in hbase:meta all
-   * the same
-   */
-  @Test
-  public void testPrepareWithRegionsWithMergeReference() throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b,
-        false);
-    RegionMergeTransactionImpl spyMT = Mockito.spy(mt);
-    doReturn(true).when(spyMT).hasMergeQualifierInMeta(null,
-        region_a.getRegionInfo().getRegionName());
-    doReturn(true).when(spyMT).hasMergeQualifierInMeta(null,
-        region_b.getRegionInfo().getRegionName());
-    assertFalse(spyMT.prepare(null));
-  }
-
-  /**
-   * Test RegionMergeTransactionListener
-   */
-  @Test public void testRegionMergeTransactionListener() throws IOException {
-    RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, region_b,
-        false);
-    RegionMergeTransactionImpl spyMT = Mockito.spy(mt);
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_a.getRegionInfo().getRegionName());
-    doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
-        region_b.getRegionInfo().getRegionName());
-    RegionMergeTransaction.TransactionListener listener =
-            Mockito.mock(RegionMergeTransaction.TransactionListener.class);
-    mt.registerTransactionListener(listener);
-    mt.prepare(null);
-    TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
-    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
-      TEST_UTIL.getConfiguration());
-    Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
-    mt.execute(mockServer, null);
-    verify(listener).transition(mt,
-            RegionMergeTransaction.RegionMergeTransactionPhase.STARTED,
-            RegionMergeTransaction.RegionMergeTransactionPhase.PREPARED);
-    verify(listener, times(10)).transition(any(RegionMergeTransaction.class),
-            any(RegionMergeTransaction.RegionMergeTransactionPhase.class),
-            any(RegionMergeTransaction.RegionMergeTransactionPhase.class));
-    verifyNoMoreInteractions(listener);
-  }
-
-  @Test
-  public void testWholesomeMerge() throws IOException, InterruptedException {
-    final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
-    final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
-    assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
-    assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a));
-    assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b));
-
-    // Start transaction.
-    RegionMergeTransactionImpl mt = prepareOnGoodRegions();
-
-    // Run the execute. Look at what it returns.
-    TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
-    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
-      TEST_UTIL.getConfiguration());
-    Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
-    HRegion mergedRegion = (HRegion)mt.execute(mockServer, null);
-    // Do some assertions about execution.
-    assertTrue(this.fs.exists(mt.getMergesDir()));
-    // Assert region_a and region_b is closed.
-    assertTrue(region_a.isClosed());
-    assertTrue(region_b.isClosed());
-
-    // Assert mergedir is empty -- because its content will have been moved out
-    // to be under the merged region dirs.
-    assertEquals(0, this.fs.listStatus(mt.getMergesDir()).length);
-    // Check merged region have correct key span.
-    assertTrue(Bytes.equals(this.region_a.getRegionInfo().getStartKey(),
-        mergedRegion.getRegionInfo().getStartKey()));
-    assertTrue(Bytes.equals(this.region_b.getRegionInfo().getEndKey(),
-        mergedRegion.getRegionInfo().getEndKey()));
-    // Count rows. merged region are already open
-    try {
-      int mergedRegionRowCount = TEST_UTIL.countRows(mergedRegion);
-      assertEquals((rowCountOfRegionA + rowCountOfRegionB),
-          mergedRegionRowCount);
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(mergedRegion);
-    }
-    // Assert the write lock is no longer held on region_a and region_b
-    assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
-    assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
-  }
-
-  @Test
-  public void testRollback() throws IOException, InterruptedException {
-    final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
-    final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
-    assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
-    assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a));
-    assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b));
-
-    // Start transaction.
-    RegionMergeTransactionImpl mt = prepareOnGoodRegions();
-
-    when(mt.createMergedRegionFromMerges(region_a, region_b,
-        mt.getMergedRegionInfo())).thenThrow(
-        new MockedFailedMergedRegionCreation());
-
-    // Run the execute. Look at what it returns.
-    boolean expectedException = false;
-    TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
-    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
-      TEST_UTIL.getConfiguration());
-    Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
-    try {
-      mt.execute(mockServer, null);
-    } catch (MockedFailedMergedRegionCreation e) {
-      expectedException = true;
-    }
-    assertTrue(expectedException);
-    // Run rollback
-    assertTrue(mt.rollback(null, null));
-
-    // Assert I can scan region_a and region_b.
-    int rowCountOfRegionA2 = TEST_UTIL.countRows(this.region_a);
-    assertEquals(rowCountOfRegionA, rowCountOfRegionA2);
-    int rowCountOfRegionB2 = TEST_UTIL.countRows(this.region_b);
-    assertEquals(rowCountOfRegionB, rowCountOfRegionB2);
-
-    // Assert rollback cleaned up stuff in fs
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir,
-        mt.getMergedRegionInfo())));
-
-    assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
-    assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
-
-    // Now retry the merge but do not throw an exception this time.
-    assertTrue(mt.prepare(null));
-    HRegion mergedRegion = (HRegion)mt.execute(mockServer, null);
-    // Count rows. daughters are already open
-    // Count rows. merged region are already open
-    try {
-      int mergedRegionRowCount = TEST_UTIL.countRows(mergedRegion);
-      assertEquals((rowCountOfRegionA + rowCountOfRegionB),
-          mergedRegionRowCount);
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(mergedRegion);
-    }
-    // Assert the write lock is no longer held on region_a and region_b
-    assertTrue(!this.region_a.lock.writeLock().isHeldByCurrentThread());
-    assertTrue(!this.region_b.lock.writeLock().isHeldByCurrentThread());
-  }
-
-  @Test
-  public void testFailAfterPONR() throws IOException, KeeperException, InterruptedException {
-    final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
-    final int rowCountOfRegionB = loadRegion(this.region_b, CF, true);
-    assertTrue(rowCountOfRegionA > 0 && rowCountOfRegionB > 0);
-    assertEquals(rowCountOfRegionA, TEST_UTIL.countRows(this.region_a));
-    assertEquals(rowCountOfRegionB, TEST_UTIL.countRows(this.region_b));
-
-    // Start transaction.
-    RegionMergeTransactionImpl mt = prepareOnGoodRegions();
-    Mockito.doThrow(new MockedFailedMergedRegionOpen())
-        .when(mt)
-        .openMergedRegion((Server) Mockito.anyObject(),
-            (RegionServerServices) Mockito.anyObject(),
-            (HRegion) Mockito.anyObject());
-
-    // Run the execute. Look at what it returns.
-    boolean expectedException = false;
-    TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
-    CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(
-      TEST_UTIL.getConfiguration());
-    Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
-    try {
-      mt.execute(mockServer, null);
-    } catch (MockedFailedMergedRegionOpen e) {
-      expectedException = true;
-    }
-    assertTrue(expectedException);
-    // Run rollback returns false that we should restart.
-    assertFalse(mt.rollback(null, null));
-    // Make sure that merged region is still in the filesystem, that
-    // they have not been removed; this is supposed to be the case if we go
-    // past point of no return.
-    Path tableDir = this.region_a.getRegionFileSystem().getRegionDir()
-        .getParent();
-    Path mergedRegionDir = new Path(tableDir, mt.getMergedRegionInfo()
-        .getEncodedName());
-    assertTrue(TEST_UTIL.getTestFileSystem().exists(mergedRegionDir));
-  }
-
-  @Test
-  public void testMergedRegionBoundary() {
-    TableName tableName =
-        TableName.valueOf("testMergedRegionBoundary");
-    byte[] a = Bytes.toBytes("a");
-    byte[] b = Bytes.toBytes("b");
-    byte[] z = Bytes.toBytes("z");
-    HRegionInfo r1 = new HRegionInfo(tableName);
-    HRegionInfo r2 = new HRegionInfo(tableName, a, z);
-    HRegionInfo m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2);
-    assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
-        && Bytes.equals(m.getEndKey(), r1.getEndKey()));
-
-    r1 = new HRegionInfo(tableName, null, a);
-    r2 = new HRegionInfo(tableName, a, z);
-    m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2);
-    assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
-        && Bytes.equals(m.getEndKey(), r2.getEndKey()));
-
-    r1 = new HRegionInfo(tableName, null, a);
-    r2 = new HRegionInfo(tableName, z, null);
-    m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2);
-    assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
-        && Bytes.equals(m.getEndKey(), r2.getEndKey()));
-
-    r1 = new HRegionInfo(tableName, a, z);
-    r2 = new HRegionInfo(tableName, z, null);
-    m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2);
-    assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
-      && Bytes.equals(m.getEndKey(), r2.getEndKey()));
-
-    r1 = new HRegionInfo(tableName, a, b);
-    r2 = new HRegionInfo(tableName, b, z);
-    m = RegionMergeTransactionImpl.getMergedRegionInfo(r1, r2);
-    assertTrue(Bytes.equals(m.getStartKey(), r1.getStartKey())
-      && Bytes.equals(m.getEndKey(), r2.getEndKey()));
-  }
-
-  /**
-   * Exception used in this class only.
-   */
-  @SuppressWarnings("serial")
-  private class MockedFailedMergedRegionCreation extends IOException {
-  }
-
-  @SuppressWarnings("serial")
-  private class MockedFailedMergedRegionOpen extends IOException {
-  }
-
-  private HRegion createRegion(final Path testdir, final WALFactory wals,
-      final byte[] startrow, final byte[] endrow)
-      throws IOException {
-    // Make a region with start and end keys.
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
-    HColumnDescriptor hcd = new HColumnDescriptor(CF);
-    htd.addFamily(hcd);
-    HRegionInfo hri = new HRegionInfo(htd.getTableName(), startrow, endrow);
-    HRegion a = HBaseTestingUtility.createRegionAndWAL(hri, testdir,
-        TEST_UTIL.getConfiguration(), htd);
-    HBaseTestingUtility.closeRegionAndWAL(a);
-    return HRegion.openHRegion(testdir, hri, htd,
-      wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()),
-      TEST_UTIL.getConfiguration());
-  }
-
-  /**
-   * Load region with rows from 'aaa' to 'zzz', skip the rows which are out of
-   * range of the region
-   * @param r Region
-   * @param f Family
-   * @param flush flush the cache if true
-   * @return Count of rows loaded.
-   * @throws IOException
-   */
-  private int loadRegion(final HRegion r, final byte[] f, final boolean flush)
-      throws IOException {
-    byte[] k = new byte[3];
-    int rowCount = 0;
-    for (byte b1 = 'a'; b1 <= 'z'; b1++) {
-      for (byte b2 = 'a'; b2 <= 'z'; b2++) {
-        for (byte b3 = 'a'; b3 <= 'z'; b3++) {
-          k[0] = b1;
-          k[1] = b2;
-          k[2] = b3;
-          if (!HRegion.rowIsInRange(r.getRegionInfo(), k)) {
-            continue;
-          }
-          Put put = new Put(k);
-          put.addColumn(f, null, k);
-          if (r.getWAL() == null)
-            put.setDurability(Durability.SKIP_WAL);
-          r.put(put);
-          rowCount++;
-        }
-      }
-      if (flush) {
-        r.flush(true);
-      }
-    }
-    return rowCount;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
deleted file mode 100644
index 661af14..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTable.java
+++ /dev/null
@@ -1,172 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.*;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests merging a normal table's regions
- */
-@Category({MiscTests.class, MediumTests.class})
-public class TestMergeTable {
-  private static final Log LOG = LogFactory.getLog(TestMergeTable.class);
-  private final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final byte [] COLUMN_NAME = Bytes.toBytes("contents");
-  private static final byte [] VALUE;
-  static {
-    // We will use the same value for the rows as that is not really important here
-    String partialValue = String.valueOf(System.currentTimeMillis());
-    StringBuilder val = new StringBuilder();
-    while (val.length() < 1024) {
-      val.append(partialValue);
-    }
-    VALUE = Bytes.toBytes(val.toString());
-  }
-
-  /**
-   * Test merge.
-   * Hand-makes regions of a mergeable size and adds the hand-made regions to
-   * hand-made meta.  The hand-made regions are created offline.  We then start
-   * up mini cluster, disables the hand-made table and starts in on merging.
-   * @throws Exception
-   */
-  @Test (timeout=300000) public void testMergeTable() throws Exception {
-    // Table we are manually creating offline.
-    HTableDescriptor desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf(Bytes.toBytes("test")));
-    desc.addFamily(new HColumnDescriptor(COLUMN_NAME));
-
-    // Set maximum regionsize down.
-    UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE, 64L * 1024L * 1024L);
-    // Make it so we don't split.
-    UTIL.getConfiguration().setInt("hbase.regionserver.regionSplitLimit", 0);
-    // Startup hdfs.  Its in here we'll be putting our manually made regions.
-    UTIL.startMiniDFSCluster(1);
-    // Create hdfs hbase rootdir.
-    Path rootdir = UTIL.createRootDir();
-    FileSystem fs = FileSystem.get(UTIL.getConfiguration());
-    if (fs.exists(rootdir)) {
-      if (fs.delete(rootdir, true)) {
-        LOG.info("Cleaned up existing " + rootdir);
-      }
-    }
-
-    // Now create three data regions: The first is too large to merge since it
-    // will be > 64 MB in size. The second two will be smaller and will be
-    // selected for merging.
-
-    // To ensure that the first region is larger than 64MB we need to write at
-    // least 65536 rows. We will make certain by writing 70000
-    byte [] row_70001 = Bytes.toBytes("row_70001");
-    byte [] row_80001 = Bytes.toBytes("row_80001");
-
-    // Create regions and populate them at same time.  Create the tabledir
-    // for them first.
-    new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir).createTableDescriptor(desc);
-    HRegion [] regions = {
-      createRegion(desc, null, row_70001, 1, 70000, rootdir),
-      createRegion(desc, row_70001, row_80001, 70001, 10000, rootdir),
-      createRegion(desc, row_80001, null, 80001, 11000, rootdir)
-    };
-
-    // Now create the root and meta regions and insert the data regions
-    // created above into hbase:meta
-    setupMeta(rootdir, regions);
-    try {
-      LOG.info("Starting mini zk cluster");
-      UTIL.startMiniZKCluster();
-      LOG.info("Starting mini hbase cluster");
-      UTIL.startMiniHBaseCluster(1, 1);
-      Configuration c = new Configuration(UTIL.getConfiguration());
-      Connection connection = UTIL.getConnection();
-
-      List<HRegionInfo> originalTableRegions =
-        MetaTableAccessor.getTableRegions(connection, desc.getTableName());
-      LOG.info("originalTableRegions size=" + originalTableRegions.size() +
-        "; " + originalTableRegions);
-      Admin admin = connection.getAdmin();
-      admin.disableTable(desc.getTableName());
-      admin.close();
-      HMerge.merge(c, FileSystem.get(c), desc.getTableName());
-      List<HRegionInfo> postMergeTableRegions =
-        MetaTableAccessor.getTableRegions(connection, desc.getTableName());
-      LOG.info("postMergeTableRegions size=" + postMergeTableRegions.size() +
-        "; " + postMergeTableRegions);
-      assertTrue("originalTableRegions=" + originalTableRegions.size() +
-        ", postMergeTableRegions=" + postMergeTableRegions.size(),
-        postMergeTableRegions.size() < originalTableRegions.size());
-      LOG.info("Done with merge");
-    } finally {
-      UTIL.shutdownMiniCluster();
-      LOG.info("After cluster shutdown");
-    }
-  }
-
-  private HRegion createRegion(final HTableDescriptor desc,
-      byte [] startKey, byte [] endKey, int firstRow, int nrows, Path rootdir)
-  throws IOException {
-    HRegionInfo hri = new HRegionInfo(desc.getTableName(), startKey, endKey);
-    HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, rootdir, UTIL.getConfiguration(),
-        desc);
-    LOG.info("Created region " + region.getRegionInfo().getRegionNameAsString());
-    for(int i = firstRow; i < firstRow + nrows; i++) {
-      Put put = new Put(Bytes.toBytes("row_" + String.format("%1$05d", i)));
-      put.setDurability(Durability.SKIP_WAL);
-      put.addColumn(COLUMN_NAME, null, VALUE);
-      region.put(put);
-      if (i % 10000 == 0) {
-        LOG.info("Flushing write #" + i);
-        region.flush(true);
-      }
-    }
-    HBaseTestingUtility.closeRegionAndWAL(region);
-    return region;
-  }
-
-  protected void setupMeta(Path rootdir, final HRegion [] regions)
-  throws IOException {
-    HRegion meta =
-      HBaseTestingUtility.createRegionAndWAL(HRegionInfo.FIRST_META_REGIONINFO, rootdir,
-          UTIL.getConfiguration(), UTIL.getMetaTableDescriptor());
-    for (HRegion r: regions) {
-      HRegion.addRegionToMETA(meta, r);
-    }
-    HBaseTestingUtility.closeRegionAndWAL(meta);
-  }
-
-}
-

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
deleted file mode 100644
index 1924c9e..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestMergeTool.java
+++ /dev/null
@@ -1,301 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
-import org.apache.hadoop.hbase.HBaseTestCase;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.InternalScanner;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.util.ToolRunner;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/** Test stand alone merge tool that can merge arbitrary regions */
-@Category({MiscTests.class, LargeTests.class})
-public class TestMergeTool extends HBaseTestCase {
-  private static final Log LOG = LogFactory.getLog(TestMergeTool.class);
-  HBaseTestingUtility TEST_UTIL;
-//  static final byte [] COLUMN_NAME = Bytes.toBytes("contents:");
-  static final byte [] FAMILY = Bytes.toBytes("contents");
-  static final byte [] QUALIFIER = Bytes.toBytes("dc");
-
-  private final HRegionInfo[] sourceRegions = new HRegionInfo[5];
-  private final HRegion[] regions = new HRegion[5];
-  private HTableDescriptor desc;
-  private byte [][][] rows;
-  private MiniDFSCluster dfsCluster = null;
-  private WALFactory wals;
-
-  @Override
-  @Before
-  public void setUp() throws Exception {
-    // Set the timeout down else this test will take a while to complete.
-    this.conf.setLong("hbase.zookeeper.recoverable.waittime", 10);
-    // Make it so we try and connect to a zk that is not there (else we might
-    // find a zk ensemble put up by another concurrent test and this will
-    // mess up this test.  Choose unlikely port. Default test port is 21818.
-    // Default zk port is 2181.
-    this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 10001);
-
-    this.conf.set("hbase.hstore.compactionThreshold", "2");
-
-    // Create table description
-    this.desc = new HTableDescriptor(org.apache.hadoop.hbase.TableName.valueOf("TestMergeTool"));
-    this.desc.addFamily(new HColumnDescriptor(FAMILY));
-
-    /*
-     * Create the HRegionInfos for the regions.
-     */
-    // Region 0 will contain the key range [row_0200,row_0300)
-    sourceRegions[0] = new HRegionInfo(this.desc.getTableName(),
-        Bytes.toBytes("row_0200"),
-      Bytes.toBytes("row_0300"));
-
-    // Region 1 will contain the key range [row_0250,row_0400) and overlaps
-    // with Region 0
-    sourceRegions[1] =
-      new HRegionInfo(this.desc.getTableName(),
-          Bytes.toBytes("row_0250"),
-          Bytes.toBytes("row_0400"));
-
-    // Region 2 will contain the key range [row_0100,row_0200) and is adjacent
-    // to Region 0 or the region resulting from the merge of Regions 0 and 1
-    sourceRegions[2] =
-      new HRegionInfo(this.desc.getTableName(),
-          Bytes.toBytes("row_0100"),
-          Bytes.toBytes("row_0200"));
-
-    // Region 3 will contain the key range [row_0500,row_0600) and is not
-    // adjacent to any of Regions 0, 1, 2 or the merged result of any or all
-    // of those regions
-    sourceRegions[3] =
-      new HRegionInfo(this.desc.getTableName(),
-          Bytes.toBytes("row_0500"),
-          Bytes.toBytes("row_0600"));
-
-    // Region 4 will have empty start and end keys and overlaps all regions.
-    sourceRegions[4] =
-      new HRegionInfo(this.desc.getTableName(),
-          HConstants.EMPTY_BYTE_ARRAY,
-          HConstants.EMPTY_BYTE_ARRAY);
-
-    /*
-     * Now create some row keys
-     */
-    this.rows = new byte [5][][];
-    this.rows[0] = Bytes.toByteArrays(new String[] { "row_0210", "row_0280" });
-    this.rows[1] = Bytes.toByteArrays(new String[] { "row_0260", "row_0350",
-        "row_035" });
-    this.rows[2] = Bytes.toByteArrays(new String[] { "row_0110", "row_0175",
-        "row_0175", "row_0175"});
-    this.rows[3] = Bytes.toByteArrays(new String[] { "row_0525", "row_0560",
-        "row_0560", "row_0560", "row_0560"});
-    this.rows[4] = Bytes.toByteArrays(new String[] { "row_0050", "row_1000",
-        "row_1000", "row_1000", "row_1000", "row_1000" });
-
-    // Start up dfs
-    TEST_UTIL = new HBaseTestingUtility(conf);
-    this.dfsCluster = TEST_UTIL.startMiniDFSCluster(2);
-    this.fs = this.dfsCluster.getFileSystem();
-    System.out.println("fs=" + this.fs);
-    FSUtils.setFsDefault(this.conf, new Path(fs.getUri()));
-    TEST_UTIL.createRootDir();
-
-    // Note: we must call super.setUp after starting the mini cluster or
-    // we will end up with a local file system
-
-    super.setUp();
-    wals = new WALFactory(conf, null, "TestMergeTool");
-    try {
-      // Create meta region
-      createMetaRegion();
-      new FSTableDescriptors(this.conf, this.fs, testDir).createTableDescriptor(this.desc);
-      /*
-       * Create the regions we will merge
-       */
-      for (int i = 0; i < sourceRegions.length; i++) {
-        regions[i] =
-          HBaseTestingUtility.createRegionAndWAL(this.sourceRegions[i], testDir, this.conf,
-              this.desc);
-        /*
-         * Insert data
-         */
-        for (int j = 0; j < rows[i].length; j++) {
-          byte [] row = rows[i][j];
-          Put put = new Put(row);
-          put.addColumn(FAMILY, QUALIFIER, row);
-          regions[i].put(put);
-        }
-        HRegion.addRegionToMETA(meta, regions[i]);
-      }
-      // Close root and meta regions
-      closeRootAndMeta();
-
-    } catch (Exception e) {
-      TEST_UTIL.shutdownMiniCluster();
-      throw e;
-    }
-  }
-
-  @Override
-  @After
-  public void tearDown() throws Exception {
-    super.tearDown();
-    for (int i = 0; i < sourceRegions.length; i++) {
-      HRegion r = regions[i];
-      if (r != null) {
-        HBaseTestingUtility.closeRegionAndWAL(r);
-      }
-    }
-    wals.close();
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  /*
-   * @param msg Message that describes this merge
-   * @param regionName1
-   * @param regionName2
-   * @param log Log to use merging.
-   * @param upperbound Verifying, how high up in this.rows to go.
-   * @return Merged region.
-   * @throws Exception
-   */
-  private HRegion mergeAndVerify(final String msg, final String regionName1,
-    final String regionName2, final WAL log, final int upperbound)
-  throws Exception {
-    Merge merger = new Merge();
-    LOG.info(msg);
-    LOG.info("fs2=" + this.conf.get("fs.defaultFS"));
-    int errCode = ToolRunner.run(this.conf, merger,
-      new String[] {this.desc.getTableName().getNameAsString(), regionName1, regionName2}
-    );
-    assertTrue("'" + msg + "' failed with errCode " + errCode, errCode == 0);
-    HRegionInfo mergedInfo = merger.getMergedHRegionInfo();
-
-    // Now verify that we can read all the rows from regions 0, 1
-    // in the new merged region.
-    HRegion merged = HRegion.openHRegion(mergedInfo, this.desc, log, this.conf);
-    verifyMerge(merged, upperbound);
-    merged.close();
-    LOG.info("Verified " + msg);
-    return merged;
-  }
-
-  private void verifyMerge(final HRegion merged, final int upperbound)
-  throws IOException {
-    //Test
-    Scan scan = new Scan();
-    scan.addFamily(FAMILY);
-    InternalScanner scanner = merged.getScanner(scan);
-    try {
-    List<Cell> testRes = null;
-      while (true) {
-        testRes = new ArrayList<Cell>();
-        boolean hasNext = scanner.next(testRes);
-        if (!hasNext) {
-          break;
-        }
-      }
-    } finally {
-      scanner.close();
-    }
-
-    //!Test
-
-    for (int i = 0; i < upperbound; i++) {
-      for (int j = 0; j < rows[i].length; j++) {
-        Get get = new Get(rows[i][j]);
-        get.addFamily(FAMILY);
-        Result result = merged.get(get);
-        assertEquals(1, result.size());
-        byte [] bytes = CellUtil.cloneValue(result.rawCells()[0]);
-        assertNotNull(Bytes.toStringBinary(rows[i][j]), bytes);
-        assertTrue(Bytes.equals(bytes, rows[i][j]));
-      }
-    }
-  }
-
-  /**
-   * Test merge tool.
-   * @throws Exception
-   */
-  @Test
-  public void testMergeTool() throws Exception {
-    // First verify we can read the rows from the source regions and that they
-    // contain the right data.
-    for (int i = 0; i < regions.length; i++) {
-      for (int j = 0; j < rows[i].length; j++) {
-        Get get = new Get(rows[i][j]);
-        get.addFamily(FAMILY);
-        Result result = regions[i].get(get);
-        byte [] bytes =  CellUtil.cloneValue(result.rawCells()[0]);
-        assertNotNull(bytes);
-        assertTrue(Bytes.equals(bytes, rows[i][j]));
-      }
-      // Close the region and delete the log
-      HBaseTestingUtility.closeRegionAndWAL(regions[i]);
-    }
-    WAL log = wals.getWAL(new byte[]{}, null);
-     // Merge Region 0 and Region 1
-    HRegion merged = mergeAndVerify("merging regions 0 and 1 ",
-      this.sourceRegions[0].getRegionNameAsString(),
-      this.sourceRegions[1].getRegionNameAsString(), log, 2);
-
-    // Merge the result of merging regions 0 and 1 with region 2
-    merged = mergeAndVerify("merging regions 0+1 and 2",
-      merged.getRegionInfo().getRegionNameAsString(),
-      this.sourceRegions[2].getRegionNameAsString(), log, 3);
-
-    // Merge the result of merging regions 0, 1 and 2 with region 3
-    merged = mergeAndVerify("merging regions 0+1+2 and 3",
-      merged.getRegionInfo().getRegionNameAsString(),
-      this.sourceRegions[3].getRegionNameAsString(), log, 4);
-
-    // Merge the result of merging regions 0, 1, 2 and 3 with region 4
-    merged = mergeAndVerify("merging regions 0+1+2+3 and 4",
-      merged.getRegionInfo().getRegionNameAsString(),
-      this.sourceRegions[4].getRegionNameAsString(), log, rows.length);
-  }
-
-}
-


[08/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 8f293f3..6a737b8 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -5507,35 +5507,33 @@ public final class MasterProtos {
 
   }
 
-  public interface DispatchMergingRegionsRequestOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.DispatchMergingRegionsRequest)
+  public interface MergeTableRegionsRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsRequest)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    boolean hasRegionA();
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA();
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> 
+        getRegionList();
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder();
-
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index);
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    boolean hasRegionB();
+    int getRegionCount();
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB();
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
+        getRegionOrBuilderList();
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(
+        int index);
 
     /**
      * <code>optional bool forcible = 3 [default = false];</code>
@@ -5567,20 +5565,21 @@ public final class MasterProtos {
   /**
    * <pre>
    **
-   * Dispatch merging the specified regions.
+   * Merging the specified regions in a table.
    * </pre>
    *
-   * Protobuf type {@code hbase.pb.DispatchMergingRegionsRequest}
+   * Protobuf type {@code hbase.pb.MergeTableRegionsRequest}
    */
-  public  static final class DispatchMergingRegionsRequest extends
+  public  static final class MergeTableRegionsRequest extends
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.DispatchMergingRegionsRequest)
-      DispatchMergingRegionsRequestOrBuilder {
-    // Use DispatchMergingRegionsRequest.newBuilder() to construct.
-    private DispatchMergingRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsRequest)
+      MergeTableRegionsRequestOrBuilder {
+    // Use MergeTableRegionsRequest.newBuilder() to construct.
+    private MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
       super(builder);
     }
-    private DispatchMergingRegionsRequest() {
+    private MergeTableRegionsRequest() {
+      region_ = java.util.Collections.emptyList();
       forcible_ = false;
       nonceGroup_ = 0L;
       nonce_ = 0L;
@@ -5591,7 +5590,7 @@ public final class MasterProtos {
     getUnknownFields() {
       return this.unknownFields;
     }
-    private DispatchMergingRegionsRequest(
+    private MergeTableRegionsRequest(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
@@ -5615,43 +5614,26 @@ public final class MasterProtos {
               break;
             }
             case 10: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = regionA_.toBuilder();
-              }
-              regionA_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(regionA_);
-                regionA_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-            case 18: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = regionB_.toBuilder();
-              }
-              regionB_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(regionB_);
-                regionB_ = subBuilder.buildPartial();
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                region_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier>();
+                mutable_bitField0_ |= 0x00000001;
               }
-              bitField0_ |= 0x00000002;
+              region_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry));
               break;
             }
             case 24: {
-              bitField0_ |= 0x00000004;
+              bitField0_ |= 0x00000001;
               forcible_ = input.readBool();
               break;
             }
             case 32: {
-              bitField0_ |= 0x00000008;
+              bitField0_ |= 0x00000002;
               nonceGroup_ = input.readUInt64();
               break;
             }
             case 40: {
-              bitField0_ |= 0x00000010;
+              bitField0_ |= 0x00000004;
               nonce_ = input.readUInt64();
               break;
             }
@@ -5663,63 +5645,59 @@ public final class MasterProtos {
         throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
             e).setUnfinishedMessage(this);
       } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          region_ = java.util.Collections.unmodifiableList(region_);
+        }
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
       }
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor;
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor;
     }
 
     protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder.class);
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class);
     }
 
     private int bitField0_;
-    public static final int REGION_A_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_;
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    public boolean hasRegionA() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
+    public static final int REGION_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> region_;
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
-      return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> getRegionList() {
+      return region_;
     }
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
-      return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
+        getRegionOrBuilderList() {
+      return region_;
     }
-
-    public static final int REGION_B_FIELD_NUMBER = 2;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_;
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    public boolean hasRegionB() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
+    public int getRegionCount() {
+      return region_.size();
     }
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
-      return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) {
+      return region_.get(index);
     }
     /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
-      return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(
+        int index) {
+      return region_.get(index);
     }
 
     public static final int FORCIBLE_FIELD_NUMBER = 3;
@@ -5728,7 +5706,7 @@ public final class MasterProtos {
      * <code>optional bool forcible = 3 [default = false];</code>
      */
     public boolean hasForcible() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
+      return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
      * <code>optional bool forcible = 3 [default = false];</code>
@@ -5743,7 +5721,7 @@ public final class MasterProtos {
      * <code>optional uint64 nonce_group = 4 [default = 0];</code>
      */
     public boolean hasNonceGroup() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
+      return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
      * <code>optional uint64 nonce_group = 4 [default = 0];</code>
@@ -5758,7 +5736,7 @@ public final class MasterProtos {
      * <code>optional uint64 nonce = 5 [default = 0];</code>
      */
     public boolean hasNonce() {
-      return ((bitField0_ & 0x00000010) == 0x00000010);
+      return ((bitField0_ & 0x00000004) == 0x00000004);
     }
     /**
      * <code>optional uint64 nonce = 5 [default = 0];</code>
@@ -5773,21 +5751,11 @@ public final class MasterProtos {
       if (isInitialized == 1) return true;
       if (isInitialized == 0) return false;
 
-      if (!hasRegionA()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasRegionB()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getRegionA().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getRegionB().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
+      for (int i = 0; i < getRegionCount(); i++) {
+        if (!getRegion(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
       }
       memoizedIsInitialized = 1;
       return true;
@@ -5795,19 +5763,16 @@ public final class MasterProtos {
 
     public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, getRegionA());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, getRegionB());
+      for (int i = 0; i < region_.size(); i++) {
+        output.writeMessage(1, region_.get(i));
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
         output.writeBool(3, forcible_);
       }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeUInt64(4, nonceGroup_);
       }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeUInt64(5, nonce_);
       }
       unknownFields.writeTo(output);
@@ -5818,23 +5783,19 @@ public final class MasterProtos {
       if (size != -1) return size;
 
       size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, getRegionA());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+      for (int i = 0; i < region_.size(); i++) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, getRegionB());
+          .computeMessageSize(1, region_.get(i));
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeBoolSize(3, forcible_);
       }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeUInt64Size(4, nonceGroup_);
       }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeUInt64Size(5, nonce_);
       }
@@ -5849,22 +5810,14 @@ public final class MasterProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) obj;
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) obj;
 
       boolean result = true;
-      result = result && (hasRegionA() == other.hasRegionA());
-      if (hasRegionA()) {
-        result = result && getRegionA()
-            .equals(other.getRegionA());
-      }
-      result = result && (hasRegionB() == other.hasRegionB());
-      if (hasRegionB()) {
-        result = result && getRegionB()
-            .equals(other.getRegionB());
-      }
+      result = result && getRegionList()
+          .equals(other.getRegionList());
       result = result && (hasForcible() == other.hasForcible());
       if (hasForcible()) {
         result = result && (getForcible()
@@ -5891,13 +5844,9 @@ public final class MasterProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasRegionA()) {
-        hash = (37 * hash) + REGION_A_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionA().hashCode();
-      }
-      if (hasRegionB()) {
-        hash = (37 * hash) + REGION_B_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionB().hashCode();
+      if (getRegionCount() > 0) {
+        hash = (37 * hash) + REGION_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionList().hashCode();
       }
       if (hasForcible()) {
         hash = (37 * hash) + FORCIBLE_FIELD_NUMBER;
@@ -5919,58 +5868,58 @@ public final class MasterProtos {
       return hash;
     }
 
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(byte[] data)
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(byte[] data)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         byte[] data,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
       return PARSER.parseFrom(data, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(java.io.InputStream input)
         throws java.io.IOException {
       return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
           .parseWithIOException(PARSER, input);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         java.io.InputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
           .parseWithIOException(PARSER, input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseDelimitedFrom(java.io.InputStream input)
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom(java.io.InputStream input)
         throws java.io.IOException {
       return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
           .parseDelimitedWithIOException(PARSER, input);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseDelimitedFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseDelimitedFrom(
         java.io.InputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
       return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
           .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
         throws java.io.IOException {
       return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
           .parseWithIOException(PARSER, input);
     }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parseFrom(
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parseFrom(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws java.io.IOException {
@@ -5982,7 +5931,7 @@ public final class MasterProtos {
     public static Builder newBuilder() {
       return DEFAULT_INSTANCE.toBuilder();
     }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest prototype) {
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest prototype) {
       return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
     }
     public Builder toBuilder() {
@@ -5999,28 +5948,28 @@ public final class MasterProtos {
     /**
      * <pre>
      **
-     * Dispatch merging the specified regions.
+     * Merging the specified regions in a table.
      * </pre>
      *
-     * Protobuf type {@code hbase.pb.DispatchMergingRegionsRequest}
+     * Protobuf type {@code hbase.pb.MergeTableRegionsRequest}
      */
     public static final class Builder extends
         org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.DispatchMergingRegionsRequest)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequestOrBuilder {
+        // @@protoc_insertion_point(builder_implements:hbase.pb.MergeTableRegionsRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequestOrBuilder {
       public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
           getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor;
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor;
       }
 
       protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
           internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_fieldAccessorTable
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable
             .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.Builder.class);
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class);
       }
 
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.newBuilder()
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.newBuilder()
       private Builder() {
         maybeForceBuilderInitialization();
       }
@@ -6033,80 +5982,66 @@ public final class MasterProtos {
       private void maybeForceBuilderInitialization() {
         if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
                 .alwaysUseFieldBuilders) {
-          getRegionAFieldBuilder();
-          getRegionBFieldBuilder();
+          getRegionFieldBuilder();
         }
       }
       public Builder clear() {
         super.clear();
-        if (regionABuilder_ == null) {
-          regionA_ = null;
-        } else {
-          regionABuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        if (regionBBuilder_ == null) {
-          regionB_ = null;
+        if (regionBuilder_ == null) {
+          region_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
         } else {
-          regionBBuilder_.clear();
+          regionBuilder_.clear();
         }
-        bitField0_ = (bitField0_ & ~0x00000002);
         forcible_ = false;
-        bitField0_ = (bitField0_ & ~0x00000004);
+        bitField0_ = (bitField0_ & ~0x00000002);
         nonceGroup_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000008);
+        bitField0_ = (bitField0_ & ~0x00000004);
         nonce_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000010);
+        bitField0_ = (bitField0_ & ~0x00000008);
         return this;
       }
 
       public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
           getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsRequest_descriptor;
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor;
       }
 
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance();
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance();
       }
 
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest result = buildPartial();
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest(this);
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (regionABuilder_ == null) {
-          result.regionA_ = regionA_;
+        if (regionBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            region_ = java.util.Collections.unmodifiableList(region_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.region_ = region_;
         } else {
-          result.regionA_ = regionABuilder_.build();
+          result.region_ = regionBuilder_.build();
         }
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        if (regionBBuilder_ == null) {
-          result.regionB_ = regionB_;
-        } else {
-          result.regionB_ = regionBBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-          to_bitField0_ |= 0x00000004;
+          to_bitField0_ |= 0x00000001;
         }
         result.forcible_ = forcible_;
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000008;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000002;
         }
         result.nonceGroup_ = nonceGroup_;
-        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
-          to_bitField0_ |= 0x00000010;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000004;
         }
         result.nonce_ = nonce_;
         result.bitField0_ = to_bitField0_;
@@ -6141,21 +6076,41 @@ public final class MasterProtos {
         return (Builder) super.addRepeatedField(field, value);
       }
       public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest)other);
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest)other);
         } else {
           super.mergeFrom(other);
           return this;
         }
       }
 
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest.getDefaultInstance()) return this;
-        if (other.hasRegionA()) {
-          mergeRegionA(other.getRegionA());
-        }
-        if (other.hasRegionB()) {
-          mergeRegionB(other.getRegionB());
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.getDefaultInstance()) return this;
+        if (regionBuilder_ == null) {
+          if (!other.region_.isEmpty()) {
+            if (region_.isEmpty()) {
+              region_ = other.region_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureRegionIsMutable();
+              region_.addAll(other.region_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.region_.isEmpty()) {
+            if (regionBuilder_.isEmpty()) {
+              regionBuilder_.dispose();
+              regionBuilder_ = null;
+              region_ = other.region_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              regionBuilder_ = 
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getRegionFieldBuilder() : null;
+            } else {
+              regionBuilder_.addAllMessages(other.region_);
+            }
+          }
         }
         if (other.hasForcible()) {
           setForcible(other.getForcible());
@@ -6172,17 +6127,10 @@ public final class MasterProtos {
       }
 
       public final boolean isInitialized() {
-        if (!hasRegionA()) {
-          return false;
-        }
-        if (!hasRegionB()) {
-          return false;
-        }
-        if (!getRegionA().isInitialized()) {
-          return false;
-        }
-        if (!getRegionB().isInitialized()) {
-          return false;
+        for (int i = 0; i < getRegionCount(); i++) {
+          if (!getRegion(i).isInitialized()) {
+            return false;
+          }
         }
         return true;
       }
@@ -6191,11 +6139,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest parsedMessage = null;
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest parsedMessage = null;
         try {
           parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
         } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest) e.getUnfinishedMessage();
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest) e.getUnfinishedMessage();
           throw e.unwrapIOException();
         } finally {
           if (parsedMessage != null) {
@@ -6206,240 +6154,244 @@ public final class MasterProtos {
       }
       private int bitField0_;
 
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionABuilder_;
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> region_ =
+        java.util.Collections.emptyList();
+      private void ensureRegionIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          region_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier>(region_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_;
+
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public boolean hasRegionA() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> getRegionList() {
+        if (regionBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(region_);
+        } else {
+          return regionBuilder_.getMessageList();
+        }
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
-        if (regionABuilder_ == null) {
-          return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
+      public int getRegionCount() {
+        if (regionBuilder_ == null) {
+          return region_.size();
         } else {
-          return regionABuilder_.getMessage();
+          return regionBuilder_.getCount();
         }
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder setRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionABuilder_ == null) {
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index) {
+        if (regionBuilder_ == null) {
+          return region_.get(index);
+        } else {
+          return regionBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
+       */
+      public Builder setRegion(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+        if (regionBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          regionA_ = value;
+          ensureRegionIsMutable();
+          region_.set(index, value);
           onChanged();
         } else {
-          regionABuilder_.setMessage(value);
+          regionBuilder_.setMessage(index, value);
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder setRegionA(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
-        if (regionABuilder_ == null) {
-          regionA_ = builderForValue.build();
+      public Builder setRegion(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+        if (regionBuilder_ == null) {
+          ensureRegionIsMutable();
+          region_.set(index, builderForValue.build());
           onChanged();
         } else {
-          regionABuilder_.setMessage(builderForValue.build());
+          regionBuilder_.setMessage(index, builderForValue.build());
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder mergeRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionABuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              regionA_ != null &&
-              regionA_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
-            regionA_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionA_).mergeFrom(value).buildPartial();
-          } else {
-            regionA_ = value;
+      public Builder addRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+        if (regionBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
           }
+          ensureRegionIsMutable();
+          region_.add(value);
           onChanged();
         } else {
-          regionABuilder_.mergeFrom(value);
+          regionBuilder_.addMessage(value);
         }
-        bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder clearRegionA() {
-        if (regionABuilder_ == null) {
-          regionA_ = null;
+      public Builder addRegion(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+        if (regionBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionIsMutable();
+          region_.add(index, value);
           onChanged();
         } else {
-          regionABuilder_.clear();
+          regionBuilder_.addMessage(index, value);
         }
-        bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionABuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getRegionAFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
-        if (regionABuilder_ != null) {
-          return regionABuilder_.getMessageOrBuilder();
+      public Builder addRegion(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+        if (regionBuilder_ == null) {
+          ensureRegionIsMutable();
+          region_.add(builderForValue.build());
+          onChanged();
         } else {
-          return regionA_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
+          regionBuilder_.addMessage(builderForValue.build());
         }
+        return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-          getRegionAFieldBuilder() {
-        if (regionABuilder_ == null) {
-          regionABuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
-                  getRegionA(),
-                  getParentForChildren(),
-                  isClean());
-          regionA_ = null;
+      public Builder addRegion(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+        if (regionBuilder_ == null) {
+          ensureRegionIsMutable();
+          region_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          regionBuilder_.addMessage(index, builderForValue.build());
         }
-        return regionABuilder_;
-      }
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBBuilder_;
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public boolean hasRegionB() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
+        return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
-        if (regionBBuilder_ == null) {
-          return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
+      public Builder addAllRegion(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> values) {
+        if (regionBuilder_ == null) {
+          ensureRegionIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, region_);
+          onChanged();
         } else {
-          return regionBBuilder_.getMessage();
+          regionBuilder_.addAllMessages(values);
         }
+        return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder setRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionBBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          regionB_ = value;
+      public Builder clearRegion() {
+        if (regionBuilder_ == null) {
+          region_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
           onChanged();
         } else {
-          regionBBuilder_.setMessage(value);
+          regionBuilder_.clear();
         }
-        bitField0_ |= 0x00000002;
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder setRegionB(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
-        if (regionBBuilder_ == null) {
-          regionB_ = builderForValue.build();
+      public Builder removeRegion(int index) {
+        if (regionBuilder_ == null) {
+          ensureRegionIsMutable();
+          region_.remove(index);
           onChanged();
         } else {
-          regionBBuilder_.setMessage(builderForValue.build());
+          regionBuilder_.remove(index);
         }
-        bitField0_ |= 0x00000002;
         return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder mergeRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionBBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              regionB_ != null &&
-              regionB_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
-            regionB_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionB_).mergeFrom(value).buildPartial();
-          } else {
-            regionB_ = value;
-          }
-          onChanged();
-        } else {
-          regionBBuilder_.mergeFrom(value);
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder(
+          int index) {
+        return getRegionFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(
+          int index) {
+        if (regionBuilder_ == null) {
+          return region_.get(index);  } else {
+          return regionBuilder_.getMessageOrBuilder(index);
         }
-        bitField0_ |= 0x00000002;
-        return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public Builder clearRegionB() {
-        if (regionBBuilder_ == null) {
-          regionB_ = null;
-          onChanged();
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
+           getRegionOrBuilderList() {
+        if (regionBuilder_ != null) {
+          return regionBuilder_.getMessageOrBuilderList();
         } else {
-          regionBBuilder_.clear();
+          return java.util.Collections.unmodifiableList(region_);
         }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBBuilder() {
-        bitField0_ |= 0x00000002;
-        onChanged();
-        return getRegionBFieldBuilder().getBuilder();
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder() {
+        return getRegionFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance());
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
-        if (regionBBuilder_ != null) {
-          return regionBBuilder_.getMessageOrBuilder();
-        } else {
-          return regionB_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
-        }
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder addRegionBuilder(
+          int index) {
+        return getRegionFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance());
       }
       /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
+       * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
        */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder> 
+           getRegionBuilderList() {
+        return getRegionFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
           org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-          getRegionBFieldBuilder() {
-        if (regionBBuilder_ == null) {
-          regionBBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          getRegionFieldBuilder() {
+        if (regionBuilder_ == null) {
+          regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
               org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
-                  getRegionB(),
+                  region_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
                   getParentForChildren(),
                   isClean());
-          regionB_ = null;
+          region_ = null;
         }
-        return regionBBuilder_;
+        return regionBuilder_;
       }
 
       private boolean forcible_ ;
@@ -6447,7 +6399,7 @@ public final class MasterProtos {
        * <code>optional bool forcible = 3 [default = false];</code>
        */
       public boolean hasForcible() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
+        return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
        * <code>optional bool forcible = 3 [default = false];</code>
@@ -6459,7 +6411,7 @@ public final class MasterProtos {
        * <code>optional bool forcible = 3 [default = false];</code>
        */
       public Builder setForcible(boolean value) {
-        bitField0_ |= 0x00000004;
+        bitField0_ |= 0x00000002;
         forcible_ = value;
         onChanged();
         return this;
@@ -6468,7 +6420,7 @@ public final class MasterProtos {
        * <code>optional bool forcible = 3 [default = false];</code>
        */
       public Builder clearForcible() {
-        bitField0_ = (bitField0_ & ~0x00000004);
+        bitField0_ = (bitField0_ & ~0x00000002);
         forcible_ = false;
         onChanged();
         return this;
@@ -6479,7 +6431,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce_group = 4 [default = 0];</code>
        */
       public boolean hasNonceGroup() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
+        return ((bitField0_ & 0x00000004) == 0x00000004);
       }
       /**
        * <code>optional uint64 nonce_group = 4 [default = 0];</code>
@@ -6491,7 +6443,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce_group = 4 [default = 0];</code>
        */
       public Builder setNonceGroup(long value) {
-        bitField0_ |= 0x00000008;
+        bitField0_ |= 0x00000004;
         nonceGroup_ = value;
         onChanged();
         return this;
@@ -6500,7 +6452,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce_group = 4 [default = 0];</code>
        */
       public Builder clearNonceGroup() {
-        bitField0_ = (bitField0_ & ~0x00000008);
+        bitField0_ = (bitField0_ & ~0x00000004);
         nonceGroup_ = 0L;
         onChanged();
         return this;
@@ -6511,7 +6463,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce = 5 [default = 0];</code>
        */
       public boolean hasNonce() {
-        return ((bitField0_ & 0x00000010) == 0x00000010);
+        return ((bitField0_ & 0x00000008) == 0x00000008);
       }
       /**
        * <code>optional uint64 nonce = 5 [default = 0];</code>
@@ -6523,7 +6475,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce = 5 [default = 0];</code>
        */
       public Builder setNonce(long value) {
-        bitField0_ |= 0x00000010;
+        bitField0_ |= 0x00000008;
         nonce_ = value;
         onChanged();
         return this;
@@ -6532,7 +6484,7 @@ public final class MasterProtos {
        * <code>optional uint64 nonce = 5 [default = 0];</code>
        */
       public Builder clearNonce() {
-        bitField0_ = (bitField0_ & ~0x00000010);
+        bitField0_ = (bitField0_ & ~0x00000008);
         nonce_ = 0L;
         onChanged();
         return this;
@@ -6548,46 +6500,46 @@ public final class MasterProtos {
       }
 
 
-      // @@protoc_insertion_point(builder_scope:hbase.pb.DispatchMergingRegionsRequest)
+      // @@protoc_insertion_point(builder_scope:hbase.pb.MergeTableRegionsRequest)
     }
 
-    // @@protoc_insertion_point(class_scope:hbase.pb.DispatchMergingRegionsRequest)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest DEFAULT_INSTANCE;
+    // @@protoc_insertion_point(class_scope:hbase.pb.MergeTableRegionsRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest DEFAULT_INSTANCE;
     static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest();
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest();
     }
 
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstance() {
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstance() {
       return DEFAULT_INSTANCE;
     }
 
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsRequest>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DispatchMergingRegionsRequest>() {
-      public DispatchMergingRegionsRequest parsePartialFrom(
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeTableRegionsRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<MergeTableRegionsRequest>() {
+      public MergeTableRegionsRequest parsePartialFrom(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new DispatchMergingRegionsRequest(input, extensionRegistry);
+          return new MergeTableRegionsRequest(input, extensionRegistry);
       }
     };
 
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsRequest> parser() {
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeTableRegionsRequest> parser() {
       return PARSER;
     }
 
     @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsRequest> getParserForType() {
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeTableRegionsRequest> getParserForType() {
       return PARSER;
     }
 
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest getDefaultInstanceForType() {
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest getDefaultInstanceForType() {
       return DEFAULT_INSTANCE;
     }
 
   }
 
-  public interface DispatchMergingRegionsResponseOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.DispatchMergingRegionsResponse)
+  public interface MergeTableRegionsResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsResponse)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
     /**
@@ -6600,17 +6552,17 @@ public final class MasterProtos {
     long getProcId();
   }
   /**
-   * Protobuf type {@code hbase.pb.DispatchMergingRegionsResponse}
+   * Protobuf type {@code hbase.pb.MergeTableRegionsResponse}
    */
-  public  static final class DispatchMergingRegionsResponse extends
+  public  static final class MergeTableRegionsResponse extends
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.DispatchMergingRegionsResponse)
-      DispatchMergingRegionsResponseOrBuilder {
-    // Use DispatchMergingRegionsResponse.newBuilder() to construct.
-    private DispatchMergingRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsResponse)
+      MergeTableRegionsResponseOrBuilder {
+    // Use MergeTableRegionsResponse.newBuilder() to construct.
+    private MergeTableRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
       super(builder);
     }
-    private DispatchMergingRegionsResponse() {
+    private MergeTableRegionsResponse() {
       procId_ = 0L;
     }
 
@@ -6619,7 +6571,7 @@ public final class MasterProtos {
     getUnknownFields() {
       return this.unknownFields;
     }
-    private DispatchMergingRegionsResponse(
+    private MergeTableRegionsResponse(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
@@ -6661,14 +6613,14 @@ public final class MasterProtos {
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor;
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_descriptor;
     }
 
     protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsResponse_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.Builder.class);
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse.Builder.class);
     }
 
     private int bitField0_;
@@ -6725,1508 +6677,10 @@ public final class MasterProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) obj;
-
-      boolean result = true;
-      result = result && (hasProcId() == other.hasProcId());
-      if (hasProcId()) {
-        result = result && (getProcId()
-            == other.getProcId());
-      }
-      result = result && unknownFields.equals(other.unknownFields);
-      return result;
-    }
-
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasProcId()) {
-        hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
-            getProcId());
-      }
-      hash = (29 * hash) + unknownFields.hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(byte[] data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        byte[] data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseDelimitedFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder() {
-      return DEFAULT_INSTANCE.toBuilder();
-    }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse prototype) {
-      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() {
-      return this == DEFAULT_INSTANCE
-          ? new Builder() : new Builder().mergeFrom(this);
-    }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.DispatchMergingRegionsResponse}
-     */
-    public static final class Builder extends
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.DispatchMergingRegionsResponse)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponseOrBuilder {
-      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor;
-      }
-
-      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-                .alwaysUseFieldBuilders) {
-        }
-      }
-      public Builder clear() {
-        super.clear();
-        procId_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_DispatchMergingRegionsResponse_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        result.procId_ = procId_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder clone() {
-        return (Builder) super.clone();
-      }
-      public Builder setField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.setField(field, value);
-      }
-      public Builder clearField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
-        return (Builder) super.clearField(field);
-      }
-      public Builder clearOneof(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
-        return (Builder) super.clearOneof(oneof);
-      }
-      public Builder setRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          int index, Object value) {
-        return (Builder) super.setRepeatedField(field, index, value);
-      }
-      public Builder addRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.addRepeatedField(field, value);
-      }
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse.getDefaultInstance()) return this;
-        if (other.hasProcId()) {
-          setProcId(other.getProcId());
-        }
-        this.mergeUnknownFields(other.unknownFields);
-        onChanged();
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse) e.getUnfinishedMessage();
-          throw e.unwrapIOException();
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      private long procId_ ;
-      /**
-       * <code>optional uint64 proc_id = 1;</code>
-       */
-      public boolean hasProcId() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>optional uint64 proc_id = 1;</code>
-       */
-      public long getProcId() {
-        return procId_;
-      }
-      /**
-       * <code>optional uint64 proc_id = 1;</code>
-       */
-      public Builder setProcId(long value) {
-        bitField0_ |= 0x00000001;
-        procId_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional uint64 proc_id = 1;</code>
-       */
-      public Builder clearProcId() {
-        bitField0_ = (bitField0_ & ~0x00000001);
-        procId_ = 0L;
-        onChanged();
-        return this;
-      }
-      public final Builder setUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.setUnknownFields(unknownFields);
-      }
-
-      public final Builder mergeUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.mergeUnknownFields(unknownFields);
-      }
-
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.DispatchMergingRegionsResponse)
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.DispatchMergingRegionsResponse)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse DEFAULT_INSTANCE;
-    static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse();
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstance() {
-      return DEFAULT_INSTANCE;
-    }
-
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsResponse>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DispatchMergingRegionsResponse>() {
-      public DispatchMergingRegionsResponse parsePartialFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new DispatchMergingRegionsResponse(input, extensionRegistry);
-      }
-    };
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsResponse> parser() {
-      return PARSER;
-    }
-
-    @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsResponse> getParserForType() {
-      return PARSER;
-    }
-
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse getDefaultInstanceForType() {
-      return DEFAULT_INSTANCE;
-    }
-
-  }
-
-  public interface MergeTableRegionsRequestOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsRequest)
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> 
-        getRegionList();
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegion(int index);
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    int getRegionCount();
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-        getRegionOrBuilderList();
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder(
-        int index);
-
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    boolean hasForcible();
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    boolean getForcible();
-
-    /**
-     * <code>optional uint64 nonce_group = 4 [default = 0];</code>
-     */
-    boolean hasNonceGroup();
-    /**
-     * <code>optional uint64 nonce_group = 4 [default = 0];</code>
-     */
-    long getNonceGroup();
-
-    /**
-     * <code>optional uint64 nonce = 5 [default = 0];</code>
-     */
-    boolean hasNonce();
-    /**
-     * <code>optional uint64 nonce = 5 [default = 0];</code>
-     */
-    long getNonce();
-  }
-  /**
-   * <pre>
-   **
-   * Merging the specified regions in a table.
-   * </pre>
-   *
-   * Protobuf type {@code hbase.pb.MergeTableRegionsRequest}
-   */
-  public  static final class MergeTableRegionsRequest extends
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsRequest)
-      MergeTableRegionsRequestOrBuilder {
-    // Use MergeTableRegionsRequest.newBuilder() to construct.
-    private MergeTableRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
-      super(builder);
-    }
-    private MergeTableRegionsRequest() {
-      region_ = java.util.Collections.emptyList();
-      forcible_ = false;
-      nonceGroup_ = 0L;
-      nonce_ = 0L;
-    }
-
-    @java.lang.Override
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-    getUnknownFields() {
-      return this.unknownFields;
-    }
-    private MergeTableRegionsRequest(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      this();
-      int mutable_bitField0_ = 0;
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-                region_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier>();
-                mutable_bitField0_ |= 0x00000001;
-              }
-              region_.add(
-                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry));
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000001;
-              forcible_ = input.readBool();
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000002;
-              nonceGroup_ = input.readUInt64();
-              break;
-            }
-            case 40: {
-              bitField0_ |= 0x00000004;
-              nonce_ = input.readUInt64();
-              break;
-            }
-          }
-        }
-      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
-            e).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
-          region_ = java.util.Collections.unmodifiableList(region_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_descriptor;
-    }
-
-    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_MergeTableRegionsRequest_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest.Builder.class);
-    }
-
-    private int bitField0_;
-    public static final int REGION_FIELD_NUMBER = 1;
-    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> region_;
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier> getRegionList() {
-      return region_;
-    }
-    /**
-     * <code>repeated .hbase.pb.RegionSpecifier region = 1;</code>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-        getRegionOrBuilderList() {
-      return region

<TRUNCATED>

[19/50] [abbrv] hbase git commit: HBASE-16786 Procedure V2 - Move ZK-lock's uses to Procedure framework locks (LockProcedure) - Matteo Bertozzi Locks are no longer hosted up in zookeeper but instead by the Master.

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 5259961..ceed050 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -48,7 +48,6 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.management.MalformedObjectNameException;
@@ -62,6 +61,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -87,6 +87,8 @@ import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.NonceGenerator;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
+import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -111,7 +113,6 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.mob.MobCacheConfig;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
@@ -147,6 +148,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@@ -214,6 +216,9 @@ import sun.misc.SignalHandler;
 public class HRegionServer extends HasThread implements
     RegionServerServices, LastSequenceId, ConfigurationObserver {
 
+  public static final String REGION_LOCK_AWAIT_TIME_SEC =
+      "hbase.regionserver.region.lock.await.time.sec";
+  public static final int DEFAULT_REGION_LOCK_AWAIT_TIME_SEC = 300;  // 5 min
   private static final Log LOG = LogFactory.getLog(HRegionServer.class);
 
   /**
@@ -338,6 +343,7 @@ public class HRegionServer extends HasThread implements
 
   // Stub to do region server status calls against the master.
   private volatile RegionServerStatusService.BlockingInterface rssStub;
+  private volatile LockService.BlockingInterface lockStub;
   // RPC client. Used to make the stub above that does region server status checking.
   RpcClient rpcClient;
 
@@ -464,9 +470,6 @@ public class HRegionServer extends HasThread implements
 
   private RegionServerQuotaManager rsQuotaManager;
 
-  // Table level lock manager for locking for region operations
-  protected TableLockManager tableLockManager;
-
   /**
    * Nonce manager. Nonces are used to make operations like increment and append idempotent
    * in the case where client doesn't receive the response from a successful operation and
@@ -604,9 +607,6 @@ public class HRegionServer extends HasThread implements
       this.csm.initialize(this);
       this.csm.start();
 
-      tableLockManager = TableLockManager.createTableLockManager(
-        conf, zooKeeper, serverName);
-
       masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this);
       masterAddressTracker.start();
 
@@ -1134,6 +1134,9 @@ public class HRegionServer extends HasThread implements
     if (this.rssStub != null) {
       this.rssStub = null;
     }
+    if (this.lockStub != null) {
+      this.lockStub = null;
+    }
     if (this.rpcClient != null) {
       this.rpcClient.close();
     }
@@ -1529,11 +1532,6 @@ public class HRegionServer extends HasThread implements
     return regionServerAccounting;
   }
 
-  @Override
-  public TableLockManager getTableLockManager() {
-    return tableLockManager;
-  }
-
   /*
    * @param r Region to get RegionLoad for.
    * @param regionLoadBldr the RegionLoad.Builder, can be null
@@ -2385,7 +2383,8 @@ public class HRegionServer extends HasThread implements
     }
     ServerName sn = null;
     long previousLogTime = 0;
-    RegionServerStatusService.BlockingInterface intf = null;
+    RegionServerStatusService.BlockingInterface intRssStub = null;
+    LockService.BlockingInterface intLockStub = null;
     boolean interrupted = false;
     try {
       while (keepLooping()) {
@@ -2409,14 +2408,16 @@ public class HRegionServer extends HasThread implements
 
         // If we are on the active master, use the shortcut
         if (this instanceof HMaster && sn.equals(getServerName())) {
-          intf = ((HMaster)this).getMasterRpcServices();
+          intRssStub = ((HMaster)this).getMasterRpcServices();
+          intLockStub = ((HMaster)this).getMasterRpcServices();
           break;
         }
         try {
           BlockingRpcChannel channel =
             this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(),
               shortOperationTimeout);
-          intf = RegionServerStatusService.newBlockingStub(channel);
+          intRssStub = RegionServerStatusService.newBlockingStub(channel);
+          intLockStub = LockService.newBlockingStub(channel);
           break;
         } catch (IOException e) {
           if (System.currentTimeMillis() > (previousLogTime + 1000)) {
@@ -2439,7 +2440,8 @@ public class HRegionServer extends HasThread implements
         Thread.currentThread().interrupt();
       }
     }
-    rssStub = intf;
+    this.rssStub = intRssStub;
+    this.lockStub = intLockStub;
     return sn;
   }
 
@@ -3616,4 +3618,11 @@ public class HRegionServer extends HasThread implements
   public SecureBulkLoadManager getSecureBulkLoadManager() {
     return this.secureBulkLoadManager;
   }
-}
+
+  @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description,
+      Abortable abort) throws IOException {
+    return new LockServiceClient(conf, lockStub, clusterConnection.getNonceGenerator())
+      .regionLock(regionInfos, description, abort);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 2a93b70..808cd20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1800,7 +1800,6 @@ public class HStore implements Store {
   @VisibleForTesting
   protected void completeCompaction(final Collection<StoreFile> compactedFiles)
     throws IOException {
-    LOG.debug("Completing compaction...");
     this.storeSize = 0L;
     this.totalUncompressedBytes = 0L;
     for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 5a6c7ed..c92124c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -25,14 +25,15 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
@@ -77,11 +78,6 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
   RegionServerAccounting getRegionServerAccounting();
 
   /**
-   * @return RegionServer's instance of {@link TableLockManager}
-   */
-  TableLockManager getTableLockManager();
-
-  /**
    * @return RegionServer's instance of {@link RegionServerQuotaManager}
    */
   RegionServerQuotaManager getRegionServerQuotaManager();
@@ -271,4 +267,10 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
    * @return the metrics tracker for the region server
    */
   MetricsRegionServer getMetrics();
+
+  /**
+   * Master based locks on namespaces/tables/regions.
+   */
+  EntityLock regionLock(List<HRegionInfo> regionInfos, String description,
+      Abortable abort) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index defffe3..553f756 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -129,7 +129,6 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;
@@ -249,7 +248,6 @@ public class HBaseFsck extends Configured implements Closeable {
   private boolean fixSplitParents = false; // fix lingering split parents
   private boolean fixReferenceFiles = false; // fix lingering reference store file
   private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows
-  private boolean fixTableLocks = false; // fix table locks which are expired
   private boolean fixReplication = false; // fix undeleted replication queues for removed peer
   private boolean fixAny = false; // Set to true if any of the fix is required.
 
@@ -768,8 +766,6 @@ public class HBaseFsck extends Configured implements Closeable {
       checkRegionBoundaries();
     }
 
-    checkAndFixTableLocks();
-
     checkAndFixReplication();
 
     // Remove the hbck znode
@@ -1537,7 +1533,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
   /**
    * Removes the empty Meta recovery WAL directory.
-   * @param walFactoryID A unique identifier for WAL factory which was used by Filesystem to make a
+   * @param walFactoryId A unique identifier for WAL factory which was used by Filesystem to make a
    *          Meta recovery WAL directory inside WAL directory path.
    */
   private void removeHBCKMetaRecoveryWALDir(String walFactoryId) throws IOException {
@@ -3342,15 +3338,6 @@ public class HBaseFsck extends Configured implements Closeable {
     return hbi;
   }
 
-  private void checkAndFixTableLocks() throws IOException {
-    TableLockChecker checker = new TableLockChecker(zkw, errors);
-    checker.checkTableLocks();
-
-    if (this.fixTableLocks) {
-      checker.fixExpiredTableLocks();
-    }
-  }
-
   private void checkAndFixReplication() throws IOException {
     ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors);
     checker.checkUnDeletedQueues();
@@ -4316,15 +4303,6 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   /**
-   * Set table locks fix mode.
-   * Delete table locks held for a long time
-   */
-  public void setFixTableLocks(boolean shouldFix) {
-    fixTableLocks = shouldFix;
-    fixAny |= shouldFix;
-  }
-
-  /**
    * Set replication fix mode.
    */
   public void setFixReplication(boolean shouldFix) {
@@ -4583,14 +4561,10 @@ public class HBaseFsck extends Configured implements Closeable {
     out.println("");
     out.println("  Metadata Repair shortcuts");
     out.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
-        "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles -fixTableLocks");
+        "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles");
     out.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
 
     out.println("");
-    out.println("  Table lock options");
-    out.println("   -fixTableLocks    Deletes table locks held for a long time (hbase.table.lock.expire.ms, 10min by default)");
-
-    out.println("");
     out.println(" Replication options");
     out.println("   -fixReplication   Deletes replication queues for removed peers");
 
@@ -4728,7 +4702,6 @@ public class HBaseFsck extends Configured implements Closeable {
         setFixSplitParents(false);
         setCheckHdfs(true);
         setFixReferenceFiles(true);
-        setFixTableLocks(true);
       } else if (cmd.equals("-repairHoles")) {
         // this will make all missing hdfs regions available but may lose data
         setFixHdfsHoles(true);
@@ -4775,8 +4748,6 @@ public class HBaseFsck extends Configured implements Closeable {
         setCheckMetaOnly();
       } else if (cmd.equals("-boundaries")) {
         setRegionBoundariesCheck();
-      } else if (cmd.equals("-fixTableLocks")) {
-        setFixTableLocks(true);
       } else if (cmd.equals("-fixReplication")) {
         setFixReplication(true);
       } else if (cmd.startsWith("-")) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
deleted file mode 100644
index 6777546..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util.hbck;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.HBaseFsck;
-import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-
-/**
- * Utility to check and fix table locks. Need zookeeper connection.
- */
-public class TableLockChecker {
-
-  private ZooKeeperWatcher zkWatcher;
-  private ErrorReporter errorReporter;
-  long expireTimeout;
-
-  public TableLockChecker(ZooKeeperWatcher zkWatcher, ErrorReporter errorReporter) {
-    this.zkWatcher = zkWatcher;
-    this.errorReporter = errorReporter;
-    expireTimeout = zkWatcher.getConfiguration().getLong(
-        TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS);
-  }
-
-  public void checkTableLocks() throws IOException {
-    TableLockManager tableLockManager
-      = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
-    final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout;
-
-    MetadataHandler handler = new MetadataHandler() {
-      @Override
-      public void handleMetadata(byte[] ownerMetadata) {
-        ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
-        String msg = "Table lock acquire attempt found:";
-        if (data != null) {
-           msg = msg +
-              String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " +
-              "purpose=%s, isShared=%s, createTime=%s]",
-              data.getTableName().getNamespace().toStringUtf8(),
-              data.getTableName().getQualifier().toStringUtf8(),
-              ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
-              data.getPurpose(), data.getIsShared(), data.getCreateTime());
-        }
-
-        if (data != null && data.hasCreateTime() && data.getCreateTime() < expireDate) {
-          errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
-        } else {
-          errorReporter.print(msg);
-        }
-      }
-    };
-
-    tableLockManager.visitAllLocks(handler);
-  }
-
-  public void fixExpiredTableLocks() throws IOException {
-    TableLockManager tableLockManager
-      = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
-
-    tableLockManager.reapAllExpiredLocks();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 404c9ae..5e2a70f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -32,11 +32,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
@@ -190,11 +189,6 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return new NullTableLockManager();
-  }
-
-  @Override
   public RegionServerQuotaManager getRegionServerQuotaManager() {
     return null;
   }
@@ -353,6 +347,12 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return null;
+  }
+
+  @Override
   public SecureBulkLoadManager getSecureBulkLoadManager() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 65eca6c..66963fd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -2173,7 +2172,7 @@ public class TestMasterObserver {
 
     final TableName tableName = TableName.valueOf("testLockedTable");
     long procId = master.getLockManager().remoteLocks().requestTableLock(tableName,
-          LockProcedure.LockType.EXCLUSIVE, "desc", HConstants.NO_NONCE, HConstants.NO_NONCE);
+          LockProcedure.LockType.EXCLUSIVE, "desc", null);
     master.getLockManager().remoteLocks().lockHeartbeat(procId, false);
 
     assertTrue(cp.preAndPostForQueueLockAndHeartbeatLockCalled());

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 48cf8a5..683e9b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -310,11 +310,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return null;
-  }
-
-  @Override
   public TableStateManager getTableStateManager() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index c5f294a..467d4a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ChoreService;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
@@ -334,11 +334,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return new NullTableLockManager();
-  }
-
-  @Override
   public RegionServerQuotaManager getRegionServerQuotaManager() {
     return null;
   }
@@ -703,6 +698,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return null;
+  }
+
+  @Override
   public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller,
       PrepareBulkLoadRequest request) throws ServiceException {
     return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
deleted file mode 100644
index 94b2bc1..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.InterProcessLock;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.ScheduledChore;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.LoadTestTool;
-import org.apache.hadoop.hbase.util.StoppableImplementation;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests the default table lock manager
- */
-@Category({MasterTests.class, LargeTests.class})
-public class TestTableLockManager {
-
-  private static final Log LOG =
-    LogFactory.getLog(TestTableLockManager.class);
-
-  private static final TableName TABLE_NAME =
-      TableName.valueOf("TestTableLevelLocks");
-
-  private static final byte[] FAMILY = Bytes.toBytes("f1");
-
-  private static final byte[] NEW_FAMILY = Bytes.toBytes("f2");
-
-  private final HBaseTestingUtility TEST_UTIL =
-    new HBaseTestingUtility();
-
-  private static final CountDownLatch deleteColumn = new CountDownLatch(1);
-  private static final CountDownLatch addColumn = new CountDownLatch(1);
-
-  public void prepareMiniCluster() throws Exception {
-    TEST_UTIL.startMiniCluster(2);
-    TEST_UTIL.createTable(TABLE_NAME, FAMILY);
-  }
-
-  public void prepareMiniZkCluster() throws Exception {
-    TEST_UTIL.startMiniZKCluster(1);
-  }
-
-  @Before
-  public void setUp() throws IOException {
-    TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-        String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver {
-    @Override
-    public void preDeleteColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, byte[] columnFamily) throws IOException {
-      deleteColumn.countDown();
-    }
-    @Override
-    public void postCompletedDeleteColumnFamilyAction(
-        ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, byte[] columnFamily) throws IOException {
-      Threads.sleep(10000);
-    }
-
-    @Override
-    public void preAddColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      fail("Add column should have timeouted out for acquiring the table lock");
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testAlterAndDisable() throws Exception {
-    prepareMiniCluster();
-    // Send a request to alter a table, then sleep during
-    // the alteration phase. In the mean time, from another
-    // thread, send a request to disable, and then delete a table.
-
-    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-    master.getMasterCoprocessorHost().load(TestAlterAndDisableMasterObserver.class,
-            0, TEST_UTIL.getConfiguration());
-
-    ExecutorService executor = Executors.newFixedThreadPool(2);
-    Future<Object> alterTableFuture = executor.submit(new Callable<Object>() {
-      @Override
-      public Object call() throws Exception {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
-        LOG.info("Added new column family");
-        HTableDescriptor tableDesc = admin.getTableDescriptor(TABLE_NAME);
-        assertTrue(tableDesc.getFamiliesKeys().contains(NEW_FAMILY));
-        return null;
-      }
-    });
-    Future<Object> disableTableFuture = executor.submit(new Callable<Object>() {
-      @Override
-      public Object call() throws Exception {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        admin.disableTable(TABLE_NAME);
-        assertTrue(admin.isTableDisabled(TABLE_NAME));
-        admin.deleteTable(TABLE_NAME);
-        assertFalse(admin.tableExists(TABLE_NAME));
-        return null;
-      }
-    });
-
-    try {
-      disableTableFuture.get();
-      alterTableFuture.get();
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof AssertionError) {
-        throw (AssertionError) e.getCause();
-      }
-      throw e;
-    }
-  }
-
-  public static class TestAlterAndDisableMasterObserver extends BaseMasterObserver {
-    @Override
-    public void preAddColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      LOG.debug("addColumn called");
-      addColumn.countDown();
-    }
-
-    @Override
-    public void postCompletedAddColumnFamilyAction(
-        ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      Threads.sleep(6000);
-      try {
-        ctx.getEnvironment().getMasterServices().checkTableModifiable(tableName);
-      } catch(TableNotDisabledException expected) {
-        //pass
-        return;
-      } catch(IOException ex) {
-      }
-      fail("was expecting the table to be enabled");
-    }
-
-    @Override
-    public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                TableName tableName) throws IOException {
-      try {
-        LOG.debug("Waiting for addColumn to be processed first");
-        //wait for addColumn to be processed first
-        addColumn.await();
-        LOG.debug("addColumn started, we can continue");
-      } catch (InterruptedException ex) {
-        LOG.warn("Sleep interrupted while waiting for addColumn countdown");
-      }
-    }
-
-    @Override
-    public void postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                        TableName tableName) throws IOException {
-      Threads.sleep(3000);
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testDelete() throws Exception {
-    prepareMiniCluster();
-
-    Admin admin = TEST_UTIL.getHBaseAdmin();
-    admin.disableTable(TABLE_NAME);
-    admin.deleteTable(TABLE_NAME);
-
-    //ensure that znode for the table node has been deleted
-    final ZooKeeperWatcher zkWatcher = TEST_UTIL.getZooKeeperWatcher();
-    final String znode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode,
-      TABLE_NAME.getNameAsString());
-
-    TEST_UTIL.waitFor(5000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        int ver = ZKUtil.checkExists(zkWatcher, znode);
-        return ver < 0;
-      }
-    });
-    int ver = ZKUtil.checkExists(zkWatcher,
-      ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, TABLE_NAME.getNameAsString()));
-    assertTrue("Unexpected znode version " + ver, ver < 0);
-
-  }
-
-  public class TableLockCounter implements InterProcessLock.MetadataHandler {
-
-    private int lockCount = 0;
-
-    @Override
-    public void handleMetadata(byte[] metadata) {
-      lockCount++;
-    }
-
-    public void reset() {
-      lockCount = 0;
-    }
-
-    public int getLockCount() {
-      return lockCount;
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testReapAllTableLocks() throws Exception {
-    prepareMiniZkCluster();
-    ServerName serverName = ServerName.valueOf("localhost:10000", 0);
-    final TableLockManager lockManager = TableLockManager.createTableLockManager(
-        TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName);
-
-    String tables[] = {"table1", "table2", "table3", "table4"};
-    ExecutorService executor = Executors.newFixedThreadPool(6);
-
-    final CountDownLatch writeLocksObtained = new CountDownLatch(4);
-    final CountDownLatch writeLocksAttempted = new CountDownLatch(10);
-    //TODO: read lock tables
-
-    //6 threads will be stuck waiting for the table lock
-    for (int i = 0; i < tables.length; i++) {
-      final String table = tables[i];
-      for (int j = 0; j < i+1; j++) { //i+1 write locks attempted for table[i]
-        executor.submit(new Callable<Void>() {
-          @Override
-          public Void call() throws Exception {
-            writeLocksAttempted.countDown();
-            lockManager.writeLock(TableName.valueOf(table),
-                    "testReapAllTableLocks").acquire();
-            writeLocksObtained.countDown();
-            return null;
-          }
-        });
-      }
-    }
-
-    writeLocksObtained.await();
-    writeLocksAttempted.await();
-
-    TableLockCounter counter = new TableLockCounter();
-    do {
-      counter.reset();
-      lockManager.visitAllLocks(counter);
-      Thread.sleep(10);
-    } while (counter.getLockCount() != 10);
-
-    //now reap all table locks
-    lockManager.reapWriteLocks();
-    TEST_UTIL.getConfiguration().setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 0);
-    TableLockManager zeroTimeoutLockManager = TableLockManager.createTableLockManager(
-          TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName);
-
-    //should not throw table lock timeout exception
-    zeroTimeoutLockManager.writeLock(
-        TableName.valueOf(tables[tables.length - 1]),
-        "zero timeout")
-      .acquire();
-
-    executor.shutdownNow();
-  }
-
-  @Test(timeout = 600000)
-  public void testTableReadLock() throws Exception {
-    // test plan: write some data to the table. Continuously alter the table and
-    // force splits
-    // concurrently until we have 5 regions. verify the data just in case.
-    // Every region should contain the same table descriptor
-    // This is not an exact test
-    prepareMiniCluster();
-    LoadTestTool loadTool = new LoadTestTool();
-    loadTool.setConf(TEST_UTIL.getConfiguration());
-    int numKeys = 10000;
-    final TableName tableName = TableName.valueOf("testTableReadLock");
-    final Admin admin = TEST_UTIL.getHBaseAdmin();
-    final HTableDescriptor desc = new HTableDescriptor(tableName);
-    final byte[] family = Bytes.toBytes("test_cf");
-    desc.addFamily(new HColumnDescriptor(family));
-    admin.createTable(desc); // create with one region
-
-    // write some data, not much
-    int ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-write",
-        String.format("%d:%d:%d", 1, 10, 10), "-num_keys", String.valueOf(numKeys), "-skip_init" });
-    if (0 != ret) {
-      String errorMsg = "Load failed with error code " + ret;
-      LOG.error(errorMsg);
-      fail(errorMsg);
-    }
-
-    int familyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
-    StoppableImplementation stopper = new StoppableImplementation();
-    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
-
-    //alter table every 10 sec
-    ScheduledChore alterThread = new ScheduledChore("Alter Chore", stopper, 10000) {
-      @Override
-      protected void chore() {
-        Random random = new Random();
-        try {
-          HTableDescriptor htd = admin.getTableDescriptor(tableName);
-          String val = String.valueOf(random.nextInt());
-          htd.getFamily(family).setValue(val, val);
-          desc.getFamily(family).setValue(val, val); // save it for later
-                                                     // control
-          admin.modifyTable(tableName, htd);
-        } catch (Exception ex) {
-          LOG.warn("Caught exception", ex);
-          fail(ex.getMessage());
-        }
-      }
-    };
-
-    //split table every 5 sec
-    ScheduledChore splitThread = new ScheduledChore("Split thread", stopper, 5000) {
-      @Override
-      public void chore() {
-        try {
-          HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
-          if (region != null) {
-            byte[] regionName = region.getRegionInfo().getRegionName();
-            admin.flushRegion(regionName);
-            admin.compactRegion(regionName);
-            admin.splitRegion(regionName);
-          } else {
-            LOG.warn("Could not find suitable region for the table.  Possibly the " +
-              "region got closed and the attempts got over before " +
-              "the region could have got reassigned.");
-          }
-        } catch (NotServingRegionException nsre) {
-          // the region may be in transition
-          LOG.warn("Caught exception", nsre);
-        } catch (Exception ex) {
-          LOG.warn("Caught exception", ex);
-          fail(ex.getMessage());
-        }
-      }
-    };
-
-    choreService.scheduleChore(alterThread);
-    choreService.scheduleChore(splitThread);
-    TEST_UTIL.waitTableEnabled(tableName);
-    while (true) {
-      List<HRegionInfo> regions = admin.getTableRegions(tableName);
-      LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
-      assertEquals(admin.getTableDescriptor(tableName), desc);
-      for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
-        HTableDescriptor regionTableDesc = region.getTableDesc();
-        assertEquals(desc, regionTableDesc);
-      }
-      if (regions.size() >= 5) {
-        break;
-      }
-      Threads.sleep(1000);
-    }
-    stopper.stop("test finished");
-
-    int newFamilyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
-    LOG.info(String.format("Altered the table %d times", newFamilyValues - familyValues));
-    assertTrue(newFamilyValues > familyValues); // at least one alter went
-                                                // through
-
-    ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-read", "100:10",
-        "-num_keys", String.valueOf(numKeys), "-skip_init" });
-    if (0 != ret) {
-      String errorMsg = "Verify failed with error code " + ret;
-      LOG.error(errorMsg);
-      fail(errorMsg);
-    }
-
-    admin.close();
-    choreService.shutdown();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
index be80646..f09ac07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -97,7 +96,6 @@ public class TestLockProcedure {
     conf.setBoolean("hbase.procedure.check.owner.set", false);  // since rpc user will be null
     conf.setInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, HEARTBEAT_TIMEOUT);
     conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT);
-    conf.setInt(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, ZK_EXPIRATION);
   }
 
   @BeforeClass
@@ -386,12 +384,6 @@ public class TestLockProcedure {
     ProcedureTestingUtility.waitProcedure(procExec, procId);
     assertEquals(false, procExec.isRunning());
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    // Remove zk lock node otherwise recovered lock will keep waiting on it. Remove
-    // both exclusive and non-exclusive (the table shared lock that the region takes).
-    // Have to pause to let the locks 'expire' up in zk. See above configs where we
-    // set explict zk timeout on locks.
-    Thread.sleep(ZK_EXPIRATION + HEARTBEAT_TIMEOUT);
-    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapAllExpiredLocks();
     ProcedureTestingUtility.restart(procExec);
     while (!procExec.isStarted(procId)) {
       Thread.sleep(250);
@@ -442,7 +434,6 @@ public class TestLockProcedure {
     assertEquals(false, procExec.isRunning());
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
     // remove zk lock node otherwise recovered lock will keep waiting on it.
-    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapWriteLocks();
     ProcedureTestingUtility.restart(procExec);
     while (!procExec.isStarted(lockProc.getProcId())) {
       Thread.sleep(250);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
index a63ac03..efa45e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
@@ -28,7 +28,6 @@ import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -243,8 +242,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
 
   @Override
   protected int doWork() throws Exception {
-    procedureScheduler = new MasterProcedureScheduler(
-        UTIL.getConfiguration(), new TableLockManager.NullTableLockManager());
+    procedureScheduler = new MasterProcedureScheduler(UTIL.getConfiguration());
     procedureScheduler.start();
     setupOperations();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 7397168..dc60710 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -28,17 +27,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,7 +53,7 @@ public class TestMasterProcedureScheduler {
   @Before
   public void setUp() throws IOException {
     conf = HBaseConfiguration.create();
-    queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
+    queue = new MasterProcedureScheduler(conf);
     queue.start();
   }
 
@@ -334,35 +329,20 @@ public class TestMasterProcedureScheduler {
   }
 
   @Test
-  public void testSharedZkLock() throws Exception {
+  public void testSharedLock() throws Exception {
     final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-    final String dir = TEST_UTIL.getDataTestDir("TestSharedZkLock").toString();
-    MiniZooKeeperCluster zkCluster = new MiniZooKeeperCluster(conf);
-    int zkPort = zkCluster.startup(new File(dir));
 
-    try {
-      conf.set("hbase.zookeeper.quorum", "localhost:" + zkPort);
-
-      ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testSchedWithZkLock", null, false);
-      ServerName mockName = ServerName.valueOf("localhost", 60000, 1);
-      MasterProcedureScheduler procQueue = new MasterProcedureScheduler(
-        conf,
-        TableLockManager.createTableLockManager(conf, zkw, mockName));
-
-      final TableName tableName = TableName.valueOf("testtb");
-      TestTableProcedure procA =
-          new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ);
-      TestTableProcedure procB =
-          new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ);
+    final TableName tableName = TableName.valueOf("testtb");
+    TestTableProcedure procA =
+        new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ);
+    TestTableProcedure procB =
+        new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ);
 
-      assertTrue(procQueue.tryAcquireTableSharedLock(procA, tableName));
-      assertTrue(procQueue.tryAcquireTableSharedLock(procB, tableName));
+    assertTrue(queue.tryAcquireTableSharedLock(procA, tableName));
+    assertTrue(queue.tryAcquireTableSharedLock(procB, tableName));
 
-      procQueue.releaseTableSharedLock(procA, tableName);
-      procQueue.releaseTableSharedLock(procB, tableName);
-    } finally {
-      zkCluster.shutdown();
-    }
+    queue.releaseTableSharedLock(procA, tableName);
+    queue.releaseTableSharedLock(procB, tableName);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
index 511b3de..a8192be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -44,7 +43,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 @Category({MasterTests.class, MediumTests.class})
@@ -59,7 +57,7 @@ public class TestMasterProcedureSchedulerConcurrency {
     conf = HBaseConfiguration.create();
     conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
         String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
-    queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
+    queue = new MasterProcedureScheduler(conf);
     queue.start();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
deleted file mode 100644
index c2f68a6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-@Category({MasterTests.class, MediumTests.class})
-public class TestMergeTableRegionsProcedure {
-  private static final Log LOG = LogFactory.getLog(TestMergeTableRegionsProcedure.class);
-
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private static final int initialRegionCount = 4;
-  private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
-  final static Configuration conf = UTIL.getConfiguration();
-  private static Admin admin;
-
-  private static void setupConf(Configuration conf) {
-    // Reduce the maximum attempts to speed up the test
-    conf.setInt("hbase.assignment.maximum.attempts", 3);
-    conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
-    conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);
-
-    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    setupConf(conf);
-    UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
-  }
-
-  @AfterClass
-  public static void cleanupTest() throws Exception {
-    try {
-      UTIL.shutdownMiniCluster();
-    } catch (Exception e) {
-      LOG.warn("failure shutting down cluster", e);
-    }
-  }
-
-  @Before
-  public void setup() throws Exception {
-    resetProcExecutorTestingKillFlag();
-    // Turn off balancer so it doesn't cut in and mess up our placements.
-    UTIL.getHBaseAdmin().setBalancerRunning(false, true);
-    // Turn off the meta scanner so it don't remove parent on us.
-    UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false);
-    resetProcExecutorTestingKillFlag();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    resetProcExecutorTestingKillFlag();
-    for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
-      LOG.info("Tear down, remove table=" + htd.getTableName());
-      UTIL.deleteTable(htd.getTableName());
-    }
-  }
-
-  private void resetProcExecutorTestingKillFlag() {
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    assertTrue("expected executor to be running", procExec.isRunning());
-  }
-
-  /**
-   * This tests two region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeTwoRegions() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeTwoRegions");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, initialRegionCount - 1);
-  }
-
-  /**
-   * This tests two concurrent region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeRegionsConcurrently() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    HRegionInfo[] regionsToMerge1 = new HRegionInfo[2];
-    HRegionInfo[] regionsToMerge2 = new HRegionInfo[2];
-    regionsToMerge1[0] = tableRegions.get(0);
-    regionsToMerge1[1] = tableRegions.get(1);
-    regionsToMerge2[0] = tableRegions.get(2);
-    regionsToMerge2[1] = tableRegions.get(3);
-
-    long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge1, true));
-    long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge2, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId1);
-    ProcedureTestingUtility.waitProcedure(procExec, procId2);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
-    assertRegionCount(tableName, initialRegionCount - 2);
-  }
-
-  @Test(timeout=60000)
-  public void testRecoveryAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(
-      new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
-
-    // Restart the executor and execute the step twice
-    int numberOfSteps = MergeTableRegionsState.values().length;
-    MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, initialRegionCount - 1);
-  }
-
-  @Test(timeout = 60000)
-  public void testRollbackAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(
-      new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
-
-    // Failing before MERGE_TABLE_REGIONS_UPDATE_META we should trigger the rollback
-    // NOTE: the 6 (number before MERGE_TABLE_REGIONS_UPDATE_META step) is
-    // hardcoded, so you have to look at this test at least once when you add a new step.
-    int numberOfSteps = 6;
-    MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
-  }
-
-  private List<HRegionInfo> createTable(final TableName tableName)
-      throws Exception {
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(FAMILY));
-    byte[][] splitRows = new byte[initialRegionCount - 1][];
-    for (int i = 0; i < splitRows.length; ++i) {
-      splitRows[i] = Bytes.toBytes(String.format("%d", i));
-    }
-    admin.createTable(desc, splitRows);
-    return assertRegionCount(tableName, initialRegionCount);
-  }
-
-  public List<HRegionInfo> assertRegionCount(final TableName tableName, final int nregions)
-      throws Exception {
-    UTIL.waitUntilNoRegionsInTransition();
-    List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);
-    assertEquals(nregions, tableRegions.size());
-    return tableRegions;
-  }
-
-  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
-    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index 2e7735b..3f3423e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -245,7 +245,13 @@ public class TestMobStoreCompaction {
     region.compact(true);
     assertEquals("After compaction: store files", 1, countStoreFiles());
     // still have original mob hfiles and now added a mob del file
-    assertEquals("After compaction: mob files", numHfiles + 1, countMobFiles());
+    // CHANGED EXPECTATION WHEN LOCKING CHANGED. In this context, there is no locking because there
+    // is not regionserverservices provided on the region (it is null). In this case when
+    // no services and therefore no means of getting a lock, we will run the mob compaction
+    // with           compaction.getRequest().forceRetainDeleteMarkers();
+    // .. .this messes w/ expected number. It is one less than when we run
+    // with the locks.
+    assertEquals("After compaction: mob files", numHfiles, countMobFiles());
 
     Scan scan = new Scan();
     scan.setRaw(true);
@@ -263,11 +269,16 @@ public class TestMobStoreCompaction {
       results.clear();
       scanner.next(results);
     }
-    // assert the delete mark is not retained after the major compaction
-    assertEquals(0, deleteCount);
+    // Assert the delete mark is not retained after the major compaction
+    // See CHANGED EXPECTATION WHEN LOCKING CHANGED note above. Here too we have different
+    // expectation in the new locking regime.
+    // assertEquals(0, deleteCount);
     scanner.close();
     // assert the deleted cell is not counted
-    assertEquals("The cells in mob files", numHfiles - 1, countMobCellsInMobFiles(1));
+    // See CHANGED EXPECTATION WHEN LOCKING CHANGED note above. Here too we have different
+    // expectation in the new locking regime. We were passing '1' and we had numHFiles -1...
+    // but changed in below.
+    assertEquals("The cells in mob files", numHfiles, countMobCellsInMobFiles(0));
   }
 
   private int countStoreFiles() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index e2a9bee..15766f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -96,10 +97,11 @@ public class TestRegionServerMetrics {
     admin = TEST_UTIL.getHBaseAdmin();
     connection = TEST_UTIL.getConnection();
 
-    while (cluster.getLiveRegionServerThreads().size() < 1) {
+    while (cluster.getLiveRegionServerThreads().isEmpty() &&
+        cluster.getRegionServer(0) == null &&
+        rs.getRegionServerMetrics() == null) {
       Threads.sleep(100);
     }
-
     rs = cluster.getRegionServer(0);
     metricsRegionServer = rs.getRegionServerMetrics();
     serverSource = metricsRegionServer.getMetricsSource();
@@ -420,7 +422,6 @@ public class TestRegionServerMetrics {
       }
       metricsRegionServer.getRegionServerWrapper().forceRecompute();
       assertCounter("mobFlushCount", numHfiles);
-
       Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles));
       ResultScanner scanner = table.getScanner(scan);
       scanner.next(100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 92eaecc..c8fe299 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -165,7 +165,6 @@ public class TestTokenAuthentication {
 
         @Override
         public ServiceDescriptor getDescriptorForType() {
-          // TODO Auto-generated method stub
           return null;
         }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index fcd5258..257dfc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.io.hfile.TestHFile;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -1479,85 +1478,6 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
   }
 
   @Test(timeout=180000)
-  public void testCheckTableLocks() throws Exception {
-    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(0);
-    EnvironmentEdgeManager.injectEdge(edge);
-    // check no errors
-    HBaseFsck hbck = doFsck(conf, false);
-    assertNoErrors(hbck);
-
-    ServerName mockName = ServerName.valueOf("localhost", 60000, 1);
-    final TableName tableName = TableName.valueOf("foo");
-
-    // obtain one lock
-    final TableLockManager tableLockManager =
-        TableLockManager.createTableLockManager(conf, TEST_UTIL.getZooKeeperWatcher(), mockName);
-    TableLockManager.TableLock
-        writeLock = tableLockManager.writeLock(tableName, "testCheckTableLocks");
-    writeLock.acquire();
-    hbck = doFsck(conf, false);
-    assertNoErrors(hbck); // should not have expired, no problems
-
-    edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire
-
-    hbck = doFsck(conf, false);
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK});
-
-    final CountDownLatch latch = new CountDownLatch(1);
-    new Thread() {
-      @Override
-      public void run() {
-        TableLockManager.TableLock
-            readLock = tableLockManager.writeLock(tableName, "testCheckTableLocks");
-        try {
-          latch.countDown();
-          readLock.acquire();
-        } catch (IOException ex) {
-          fail();
-        } catch (IllegalStateException ex) {
-          return; // expected, since this will be reaped under us.
-        }
-        fail("should not have come here");
-      };
-    }.start();
-
-    latch.await(); // wait until thread starts
-    Threads.sleep(300); // wait some more to ensure writeLock.acquire() is called
-
-    hbck = doFsck(conf, false);
-    // still one expired, one not-expired
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK});
-
-    edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire
-
-    hbck = doFsck(conf, false);
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK,
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); // both are expired
-
-    Configuration localConf = new Configuration(conf);
-    // reaping from ZKInterProcessWriteLock uses znode cTime,
-    // which is not injectable through EnvironmentEdge
-    localConf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1);
-
-    Threads.sleep(10);
-    hbck = doFsck(localConf, true); // now fix both cases
-
-    hbck = doFsck(localConf, false);
-    assertNoErrors(hbck);
-
-    // ensure that locks are deleted
-    writeLock = tableLockManager.writeLock(tableName, "should acquire without blocking");
-    writeLock.acquire(); // this should not block.
-    writeLock.release(); // release for clean state
-    tableLockManager.tableDeleted(tableName);
-  }
-
-  @Test(timeout=180000)
   public void testCheckReplication() throws Exception {
     // check no errors
     HBaseFsck hbck = doFsck(conf, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
index d1e774e..0c9b036 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
@@ -61,7 +61,6 @@ public class HbckTestingUtil {
       fsck.setFixVersionFile(fixVersionFile);
       fsck.setFixReferenceFiles(fixReferenceFiles);
       fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo);
-      fsck.setFixTableLocks(fixTableLocks);
       fsck.setFixReplication(fixReplication);
       if (table != null) {
         fsck.includeTable(table);


[44/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 05894b9..1925828 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -217,12 +217,20 @@ public final class QuotaProtos {
      * <code>THROTTLE = 1;</code>
      */
     THROTTLE(0, 1),
+    /**
+     * <code>SPACE = 2;</code>
+     */
+    SPACE(1, 2),
     ;
 
     /**
      * <code>THROTTLE = 1;</code>
      */
     public static final int THROTTLE_VALUE = 1;
+    /**
+     * <code>SPACE = 2;</code>
+     */
+    public static final int SPACE_VALUE = 2;
 
 
     public final int getNumber() { return value; }
@@ -230,6 +238,7 @@ public final class QuotaProtos {
     public static QuotaType valueOf(int value) {
       switch (value) {
         case 1: return THROTTLE;
+        case 2: return SPACE;
         default: return null;
       }
     }
@@ -281,6 +290,142 @@ public final class QuotaProtos {
     // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   *
+   * <pre>
+   * Defines what action should be taken when the SpaceQuota is violated
+   * </pre>
+   */
+  public enum SpaceViolationPolicy
+      implements com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <code>DISABLE = 1;</code>
+     *
+     * <pre>
+     * Disable the table(s)
+     * </pre>
+     */
+    DISABLE(0, 1),
+    /**
+     * <code>NO_WRITES_COMPACTIONS = 2;</code>
+     *
+     * <pre>
+     * No writes, bulk-loads, or compactions
+     * </pre>
+     */
+    NO_WRITES_COMPACTIONS(1, 2),
+    /**
+     * <code>NO_WRITES = 3;</code>
+     *
+     * <pre>
+     * No writes or bulk-loads
+     * </pre>
+     */
+    NO_WRITES(2, 3),
+    /**
+     * <code>NO_INSERTS = 4;</code>
+     *
+     * <pre>
+     * No puts or bulk-loads, but deletes are allowed
+     * </pre>
+     */
+    NO_INSERTS(3, 4),
+    ;
+
+    /**
+     * <code>DISABLE = 1;</code>
+     *
+     * <pre>
+     * Disable the table(s)
+     * </pre>
+     */
+    public static final int DISABLE_VALUE = 1;
+    /**
+     * <code>NO_WRITES_COMPACTIONS = 2;</code>
+     *
+     * <pre>
+     * No writes, bulk-loads, or compactions
+     * </pre>
+     */
+    public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+    /**
+     * <code>NO_WRITES = 3;</code>
+     *
+     * <pre>
+     * No writes or bulk-loads
+     * </pre>
+     */
+    public static final int NO_WRITES_VALUE = 3;
+    /**
+     * <code>NO_INSERTS = 4;</code>
+     *
+     * <pre>
+     * No puts or bulk-loads, but deletes are allowed
+     * </pre>
+     */
+    public static final int NO_INSERTS_VALUE = 4;
+
+
+    public final int getNumber() { return value; }
+
+    public static SpaceViolationPolicy valueOf(int value) {
+      switch (value) {
+        case 1: return DISABLE;
+        case 2: return NO_WRITES_COMPACTIONS;
+        case 3: return NO_WRITES;
+        case 4: return NO_INSERTS;
+        default: return null;
+      }
+    }
+
+    public static com.google.protobuf.Internal.EnumLiteMap<SpaceViolationPolicy>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static com.google.protobuf.Internal.EnumLiteMap<SpaceViolationPolicy>
+        internalValueMap =
+          new com.google.protobuf.Internal.EnumLiteMap<SpaceViolationPolicy>() {
+            public SpaceViolationPolicy findValueByNumber(int number) {
+              return SpaceViolationPolicy.valueOf(number);
+            }
+          };
+
+    public final com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(index);
+    }
+    public final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+    }
+
+    private static final SpaceViolationPolicy[] VALUES = values();
+
+    public static SpaceViolationPolicy valueOf(
+        com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int index;
+    private final int value;
+
+    private SpaceViolationPolicy(int index, int value) {
+      this.index = index;
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy)
+  }
+
   public interface TimedQuotaOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -3315,6 +3460,20 @@ public final class QuotaProtos {
      * <code>optional .hbase.pb.Throttle throttle = 2;</code>
      */
     org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder();
+
+    // optional .hbase.pb.SpaceQuota space = 3;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    boolean hasSpace();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder();
   }
   /**
    * Protobuf type {@code hbase.pb.Quotas}
@@ -3385,6 +3544,19 @@ public final class QuotaProtos {
               bitField0_ |= 0x00000002;
               break;
             }
+            case 26: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000004) == 0x00000004)) {
+                subBuilder = space_.toBuilder();
+              }
+              space_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(space_);
+                space_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000004;
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3463,9 +3635,32 @@ public final class QuotaProtos {
       return throttle_;
     }
 
+    // optional .hbase.pb.SpaceQuota space = 3;
+    public static final int SPACE_FIELD_NUMBER = 3;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota space_;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public boolean hasSpace() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace() {
+      return space_;
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() {
+      return space_;
+    }
+
     private void initFields() {
       bypassGlobals_ = false;
       throttle_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance();
+      space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -3491,6 +3686,9 @@ public final class QuotaProtos {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeMessage(2, throttle_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeMessage(3, space_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -3508,6 +3706,10 @@ public final class QuotaProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeMessageSize(2, throttle_);
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, space_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -3541,6 +3743,11 @@ public final class QuotaProtos {
         result = result && getThrottle()
             .equals(other.getThrottle());
       }
+      result = result && (hasSpace() == other.hasSpace());
+      if (hasSpace()) {
+        result = result && getSpace()
+            .equals(other.getSpace());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -3562,6 +3769,10 @@ public final class QuotaProtos {
         hash = (37 * hash) + THROTTLE_FIELD_NUMBER;
         hash = (53 * hash) + getThrottle().hashCode();
       }
+      if (hasSpace()) {
+        hash = (37 * hash) + SPACE_FIELD_NUMBER;
+        hash = (53 * hash) + getSpace().hashCode();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -3664,6 +3875,7 @@ public final class QuotaProtos {
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
           getThrottleFieldBuilder();
+          getSpaceFieldBuilder();
         }
       }
       private static Builder create() {
@@ -3680,6 +3892,12 @@ public final class QuotaProtos {
           throttleBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000002);
+        if (spaceBuilder_ == null) {
+          space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+        } else {
+          spaceBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -3720,6 +3938,14 @@ public final class QuotaProtos {
         } else {
           result.throttle_ = throttleBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        if (spaceBuilder_ == null) {
+          result.space_ = space_;
+        } else {
+          result.space_ = spaceBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -3742,6 +3968,9 @@ public final class QuotaProtos {
         if (other.hasThrottle()) {
           mergeThrottle(other.getThrottle());
         }
+        if (other.hasSpace()) {
+          mergeSpace(other.getSpace());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -3925,6 +4154,123 @@ public final class QuotaProtos {
         return throttleBuilder_;
       }
 
+      // optional .hbase.pb.SpaceQuota space = 3;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> spaceBuilder_;
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public boolean hasSpace() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getSpace() {
+        if (spaceBuilder_ == null) {
+          return space_;
+        } else {
+          return spaceBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder setSpace(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (spaceBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          space_ = value;
+          onChanged();
+        } else {
+          spaceBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder setSpace(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) {
+        if (spaceBuilder_ == null) {
+          space_ = builderForValue.build();
+          onChanged();
+        } else {
+          spaceBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder mergeSpace(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (spaceBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004) &&
+              space_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) {
+            space_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(space_).mergeFrom(value).buildPartial();
+          } else {
+            space_ = value;
+          }
+          onChanged();
+        } else {
+          spaceBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder clearSpace() {
+        if (spaceBuilder_ == null) {
+          space_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          spaceBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder getSpaceBuilder() {
+        bitField0_ |= 0x00000004;
+        onChanged();
+        return getSpaceFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() {
+        if (spaceBuilder_ != null) {
+          return spaceBuilder_.getMessageOrBuilder();
+        } else {
+          return space_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          getSpaceFieldBuilder() {
+        if (spaceBuilder_ == null) {
+          spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>(
+                  space_,
+                  getParentForChildren(),
+                  isClean());
+          space_ = null;
+        }
+        return spaceBuilder_;
+      }
+
       // @@protoc_insertion_point(builder_scope:hbase.pb.Quotas)
     }
 
@@ -4274,81 +4620,1257 @@ public final class QuotaProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.QuotaUsage)
   }
 
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_TimedQuota_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Throttle_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_Throttle_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ThrottleRequest_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Quotas_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_Quotas_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_QuotaUsage_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_QuotaUsage_fieldAccessorTable;
+  public interface SpaceQuotaOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
 
-  public static com.google.protobuf.Descriptors.FileDescriptor
-      getDescriptor() {
-    return descriptor;
+    // optional uint64 soft_limit = 1;
+    /**
+     * <code>optional uint64 soft_limit = 1;</code>
+     *
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     */
+    boolean hasSoftLimit();
+    /**
+     * <code>optional uint64 soft_limit = 1;</code>
+     *
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     */
+    long getSoftLimit();
+
+    // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;
+    /**
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     *
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     */
+    boolean hasViolationPolicy();
+    /**
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     *
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy();
   }
-  private static com.google.protobuf.Descriptors.FileDescriptor
-      descriptor;
-  static {
-    java.lang.String[] descriptorData = {
-      "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" +
-      "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" +
-      "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" +
-      "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" +
-      "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" +
-      "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" +
-      "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" +
-      "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" +
-      " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" +
-      " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030",
-      "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" +
-      "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" +
-      "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" +
-      "edQuota\"M\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" +
-      "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" +
-      "rottle\"\014\n\nQuotaUsage*&\n\nQuotaScope\022\013\n\007CL" +
-      "USTER\020\001\022\013\n\007MACHINE\020\002*v\n\014ThrottleType\022\022\n\016" +
-      "REQUEST_NUMBER\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WR" +
-      "ITE_NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUM" +
-      "BER\020\005\022\r\n\tREAD_SIZE\020\006*\031\n\tQuotaType\022\014\n\010THR",
-      "OTTLE\020\001BA\n*org.apache.hadoop.hbase.proto" +
-      "buf.generatedB\013QuotaProtosH\001\210\001\001\240\001\001"
-    };
-    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
-      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
-        public com.google.protobuf.ExtensionRegistry assignDescriptors(
-            com.google.protobuf.Descriptors.FileDescriptor root) {
-          descriptor = root;
-          internal_static_hbase_pb_TimedQuota_descriptor =
-            getDescriptor().getMessageTypes().get(0);
-          internal_static_hbase_pb_TimedQuota_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_TimedQuota_descriptor,
-              new java.lang.String[] { "TimeUnit", "SoftLimit", "Share", "Scope", });
-          internal_static_hbase_pb_Throttle_descriptor =
-            getDescriptor().getMessageTypes().get(1);
-          internal_static_hbase_pb_Throttle_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_Throttle_descriptor,
-              new java.lang.String[] { "ReqNum", "ReqSize", "WriteNum", "WriteSize", "ReadNum", "ReadSize", });
-          internal_static_hbase_pb_ThrottleRequest_descriptor =
+  /**
+   * Protobuf type {@code hbase.pb.SpaceQuota}
+   *
+   * <pre>
+   * Defines a limit on the amount of filesystem space used by a table/namespace
+   * </pre>
+   */
+  public static final class SpaceQuota extends
+      com.google.protobuf.GeneratedMessage
+      implements SpaceQuotaOrBuilder {
+    // Use SpaceQuota.newBuilder() to construct.
+    private SpaceQuota(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private SpaceQuota(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final SpaceQuota defaultInstance;
+    public static SpaceQuota getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public SpaceQuota getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SpaceQuota(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              softLimit_ = input.readUInt64();
+              break;
+            }
+            case 16: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(2, rawValue);
+              } else {
+                bitField0_ |= 0x00000002;
+                violationPolicy_ = value;
+              }
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<SpaceQuota> PARSER =
+        new com.google.protobuf.AbstractParser<SpaceQuota>() {
+      public SpaceQuota parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new SpaceQuota(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<SpaceQuota> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional uint64 soft_limit = 1;
+    public static final int SOFT_LIMIT_FIELD_NUMBER = 1;
+    private long softLimit_;
+    /**
+     * <code>optional uint64 soft_limit = 1;</code>
+     *
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     */
+    public boolean hasSoftLimit() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional uint64 soft_limit = 1;</code>
+     *
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     */
+    public long getSoftLimit() {
+      return softLimit_;
+    }
+
+    // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;
+    public static final int VIOLATION_POLICY_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_;
+    /**
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     *
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     */
+    public boolean hasViolationPolicy() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     *
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+      return violationPolicy_;
+    }
+
+    private void initFields() {
+      softLimit_ = 0L;
+      violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, softLimit_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeEnum(2, violationPolicy_.getNumber());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, softLimit_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(2, violationPolicy_.getNumber());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) obj;
+
+      boolean result = true;
+      result = result && (hasSoftLimit() == other.hasSoftLimit());
+      if (hasSoftLimit()) {
+        result = result && (getSoftLimit()
+            == other.getSoftLimit());
+      }
+      result = result && (hasViolationPolicy() == other.hasViolationPolicy());
+      if (hasViolationPolicy()) {
+        result = result &&
+            (getViolationPolicy() == other.getViolationPolicy());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSoftLimit()) {
+        hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getSoftLimit());
+      }
+      if (hasViolationPolicy()) {
+        hash = (37 * hash) + VIOLATION_POLICY_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getViolationPolicy());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.SpaceQuota}
+     *
+     * <pre>
+     * Defines a limit on the amount of filesystem space used by a table/namespace
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        softLimit_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.softLimit_ = softLimit_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.violationPolicy_ = violationPolicy_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) return this;
+        if (other.hasSoftLimit()) {
+          setSoftLimit(other.getSoftLimit());
+        }
+        if (other.hasViolationPolicy()) {
+          setViolationPolicy(other.getViolationPolicy());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional uint64 soft_limit = 1;
+      private long softLimit_ ;
+      /**
+       * <code>optional uint64 soft_limit = 1;</code>
+       *
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       */
+      public boolean hasSoftLimit() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional uint64 soft_limit = 1;</code>
+       *
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       */
+      public long getSoftLimit() {
+        return softLimit_;
+      }
+      /**
+       * <code>optional uint64 soft_limit = 1;</code>
+       *
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       */
+      public Builder setSoftLimit(long value) {
+        bitField0_ |= 0x00000001;
+        softLimit_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 soft_limit = 1;</code>
+       *
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       */
+      public Builder clearSoftLimit() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        softLimit_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+      /**
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       *
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       */
+      public boolean hasViolationPolicy() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       *
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+        return violationPolicy_;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       *
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       */
+      public Builder setViolationPolicy(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000002;
+        violationPolicy_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       *
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       */
+      public Builder clearViolationPolicy() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceQuota)
+    }
+
+    static {
+      defaultInstance = new SpaceQuota(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.SpaceQuota)
+  }
+
+  public interface SpaceLimitRequestOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // optional .hbase.pb.SpaceQuota quota = 1;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    boolean hasQuota();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.SpaceLimitRequest}
+   *
+   * <pre>
+   * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+   * </pre>
+   */
+  public static final class SpaceLimitRequest extends
+      com.google.protobuf.GeneratedMessage
+      implements SpaceLimitRequestOrBuilder {
+    // Use SpaceLimitRequest.newBuilder() to construct.
+    private SpaceLimitRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private SpaceLimitRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final SpaceLimitRequest defaultInstance;
+    public static SpaceLimitRequest getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public SpaceLimitRequest getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SpaceLimitRequest(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = quota_.toBuilder();
+              }
+              quota_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(quota_);
+                quota_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<SpaceLimitRequest> PARSER =
+        new com.google.protobuf.AbstractParser<SpaceLimitRequest>() {
+      public SpaceLimitRequest parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new SpaceLimitRequest(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<SpaceLimitRequest> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // optional .hbase.pb.SpaceQuota quota = 1;
+    public static final int QUOTA_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota quota_;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public boolean hasQuota() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota() {
+      return quota_;
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() {
+      return quota_;
+    }
+
+    private void initFields() {
+      quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, quota_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, quota_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) obj;
+
+      boolean result = true;
+      result = result && (hasQuota() == other.hasQuota());
+      if (hasQuota()) {
+        result = result && getQuota()
+            .equals(other.getQuota());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasQuota()) {
+        hash = (37 * hash) + QUOTA_FIELD_NUMBER;
+        hash = (53 * hash) + getQuota().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.SpaceLimitRequest}
+     *
+     * <pre>
+     * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getQuotaFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        if (quotaBuilder_ == null) {
+          quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+        } else {
+          quotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest build() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest result = new org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (quotaBuilder_ == null) {
+          result.quota_ = quota_;
+        } else {
+          result.quota_ = quotaBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) return this;
+        if (other.hasQuota()) {
+          mergeQuota(other.getQuota());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceLimitRequest) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // optional .hbase.pb.SpaceQuota quota = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> quotaBuilder_;
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public boolean hasQuota() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota getQuota() {
+        if (quotaBuilder_ == null) {
+          return quota_;
+        } else {
+          return quotaBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder setQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (quotaBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          quota_ = value;
+          onChanged();
+        } else {
+          quotaBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder setQuota(
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) {
+        if (quotaBuilder_ == null) {
+          quota_ = builderForValue.build();
+          onChanged();
+        } else {
+          quotaBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder mergeQuota(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (quotaBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              quota_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) {
+            quota_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(quota_).mergeFrom(value).buildPartial();
+          } else {
+            quota_ = value;
+          }
+          onChanged();
+        } else {
+          quotaBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder clearQuota() {
+        if (quotaBuilder_ == null) {
+          quota_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+          onChanged();
+        } else {
+          quotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder getQuotaBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getQuotaFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() {
+        if (quotaBuilder_ != null) {
+          return quotaBuilder_.getMessageOrBuilder();
+        } else {
+          return quota_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          getQuotaFieldBuilder() {
+        if (quotaBuilder_ == null) {
+          quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>(
+                  quota_,
+                  getParentForChildren(),
+                  isClean());
+          quota_ = null;
+        }
+        return quotaBuilder_;
+      }
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceLimitRequest)
+    }
+
+    static {
+      defaultInstance = new SpaceLimitRequest(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest)
+  }
+
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TimedQuota_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_Throttle_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_Throttle_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_ThrottleRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_Quotas_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_Quotas_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_QuotaUsage_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_QuotaUsage_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_SpaceQuota_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_SpaceQuota_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable;
+
+  public static com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" +
+      "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" +
+      "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" +
+      "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" +
+      "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" +
+      "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" +
+      "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" +
+      "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" +
+      " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" +
+      " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030",
+      "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" +
+      "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" +
+      "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" +
+      "edQuota\"r\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" +
+      "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" +
+      "rottle\022#\n\005space\030\003 \001(\0132\024.hbase.pb.SpaceQu" +
+      "ota\"\014\n\nQuotaUsage\"Z\n\nSpaceQuota\022\022\n\nsoft_" +
+      "limit\030\001 \001(\004\0228\n\020violation_policy\030\002 \001(\0162\036." +
+      "hbase.pb.SpaceViolationPolicy\"8\n\021SpaceLi" +
+      "mitRequest\022#\n\005quota\030\001 \001(\0132\024.hbase.pb.Spa",
+      "ceQuota*&\n\nQuotaScope\022\013\n\007CLUSTER\020\001\022\013\n\007MA" +
+      "CHINE\020\002*v\n\014ThrottleType\022\022\n\016REQUEST_NUMBE" +
+      "R\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022" +
+      "\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020\005\022\r\n\tREAD" +
+      "_SIZE\020\006*$\n\tQuotaType\022\014\n\010THROTTLE\020\001\022\t\n\005SP" +
+      "ACE\020\002*]\n\024SpaceViolationPolicy\022\013\n\007DISABLE" +
+      "\020\001\022\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r\n\tNO_WRIT" +
+      "ES\020\003\022\016\n\nNO_INSERTS\020\004BA\n*org.apache.hadoo" +
+      "p.hbase.protobuf.generatedB\013QuotaProtosH" +
+      "\001\210\001\001\240\001\001"
+    };
+    com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+      new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
+        public com.google.protobuf.ExtensionRegistry assignDescriptors(
+            com.google.protobuf.Descriptors.FileDescriptor root) {
+          descriptor = root;
+          internal_static_hbase_pb_TimedQuota_descriptor =
+            getDescriptor().getMessageTypes().get(0);
+          internal_static_hbase_pb_TimedQuota_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_TimedQuota_descriptor,
+              new java.lang.String[] { "TimeUnit", "SoftLimit", "Share", "Scope", });
+          internal_static_hbase_pb_Throttle_descriptor =
+            getDescriptor().getMessageTypes().get(1);
+          internal_static_hbase_pb_Throttle_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_Throttle_descriptor,
+              new java.lang.String[] { "ReqNum", "ReqSize", "WriteNum", "WriteSize", "ReadNum", "ReadSize", });
+          internal_static_hbase_pb_ThrottleRequest_descriptor =
             getDescriptor().getMessageTypes().get(2);
           internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
@@ -4359,13 +5881,25 @@ public final class QuotaProtos {
           internal_static_hbase_pb_Quotas_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_Quotas_descriptor,
-              new java.lang.String[] { "BypassGlobals", "Throttle", });
+              new java.lang.String[] { "BypassGlobals", "Throttle", "Space", });
           internal_static_hbase_pb_QuotaUsage_descriptor =
             getDescriptor().getMessageTypes().get(4);
           internal_static_hbase_pb_QuotaUsage_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_QuotaUsage_descriptor,
               new java.lang.String[] { });
+          internal_static_hbase_pb_SpaceQuota_descriptor =
+            getDescriptor().getMessageTypes().get(5);
+          internal_static_hbase_pb_SpaceQuota_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_SpaceQuota_descriptor,
+              new java.lang.String[] { "SoftLimit", "ViolationPolicy", });
+          internal_static_hbase_pb_SpaceLimitRequest_descriptor =
+            getDescriptor().getMessageTypes().get(6);
+          internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_hbase_pb_SpaceLimitRequest_descriptor,
+              new java.lang.String[] { "Quota", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol/src/main/protobuf/Quota.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Quota.proto b/hbase-protocol/src/main/protobuf/Quota.proto
index a8303b1..c12b997 100644
--- a/hbase-protocol/src/main/protobuf/Quota.proto
+++ b/hbase-protocol/src/main/protobuf/Quota.proto
@@ -65,12 +65,33 @@ message ThrottleRequest {
 
 enum QuotaType {
   THROTTLE = 1;
+  SPACE = 2;
 }
 
 message Quotas {
   optional bool bypass_globals = 1 [default = false];
   optional Throttle throttle = 2;
+  optional SpaceQuota space = 3;
 }
 
 message QuotaUsage {
 }
+
+// Defines what action should be taken when the SpaceQuota is violated
+enum SpaceViolationPolicy {
+  DISABLE = 1; // Disable the table(s)
+  NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions
+  NO_WRITES = 3; // No writes or bulk-loads
+  NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed
+}
+
+// Defines a limit on the amount of filesystem space used by a table/namespace
+message SpaceQuota {
+  optional uint64 soft_limit = 1; // The limit of bytes for this quota
+  optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated
+}
+
+// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+message SpaceLimitRequest {
+  optional SpaceQuota quota = 1;
+}


[43/50] [abbrv] hbase git commit: HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota table (Josh Elser)

Posted by el...@apache.org.
HBASE-16996 Implement storage/retrieval of filesystem-use quotas into quota table (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7abe4c1b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7abe4c1b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7abe4c1b

Branch: refs/heads/HBASE-16961
Commit: 7abe4c1b20317292135f358f7a2c1c05bdddc79d
Parents: 41d73bd
Author: tedyu <yu...@gmail.com>
Authored: Sat Dec 3 14:30:48 2016 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Mon Jan 23 17:48:59 2017 -0500

----------------------------------------------------------------------
 .../hadoop/hbase/quotas/QuotaTableUtil.java     |  13 +-
 .../hadoop/hbase/quotas/MasterQuotaManager.java |  30 +++++
 .../hadoop/hbase/quotas/TestQuotaAdmin.java     | 125 ++++++++++++++++++-
 3 files changed, 165 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7abe4c1b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
index 116dd0c..1640ddc 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaTableUtil.java
@@ -52,7 +52,9 @@ import org.apache.hadoop.hbase.util.Strings;
  * <pre>
  *     ROW-KEY      FAM/QUAL        DATA
  *   n.&lt;namespace&gt; q:s         &lt;global-quotas&gt;
+ *   n.&lt;namespace&gt; u:du        &lt;size in bytes&gt;
  *   t.&lt;table&gt;     q:s         &lt;global-quotas&gt;
+ *   t.&lt;table&gt;     u:du        &lt;size in bytes&gt;
  *   u.&lt;user&gt;      q:s         &lt;global-quotas&gt;
  *   u.&lt;user&gt;      q:s.&lt;table&gt; &lt;table-quotas&gt;
  *   u.&lt;user&gt;      q:s.&lt;ns&gt;:   &lt;namespace-quotas&gt;
@@ -71,6 +73,7 @@ public class QuotaTableUtil {
   protected static final byte[] QUOTA_FAMILY_USAGE = Bytes.toBytes("u");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS = Bytes.toBytes("s");
   protected static final byte[] QUOTA_QUALIFIER_SETTINGS_PREFIX = Bytes.toBytes("s.");
+  protected static final byte[] QUOTA_QUALIFIER_DISKUSAGE = Bytes.toBytes("du");
   protected static final byte[] QUOTA_USER_ROW_KEY_PREFIX = Bytes.toBytes("u.");
   protected static final byte[] QUOTA_TABLE_ROW_KEY_PREFIX = Bytes.toBytes("t.");
   protected static final byte[] QUOTA_NAMESPACE_ROW_KEY_PREFIX = Bytes.toBytes("n.");
@@ -298,11 +301,16 @@ public class QuotaTableUtil {
    *  Quotas protobuf helpers
    */
   protected static Quotas quotasFromData(final byte[] data) throws IOException {
+    return quotasFromData(data, 0, data.length);
+  }
+
+  protected static Quotas quotasFromData(
+      final byte[] data, int offset, int length) throws IOException {
     int magicLen = ProtobufMagic.lengthOfPBMagic();
-    if (!ProtobufMagic.isPBMagicPrefix(data, 0, magicLen)) {
+    if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) {
       throw new IOException("Missing pb magic prefix");
     }
-    return Quotas.parseFrom(new ByteArrayInputStream(data, magicLen, data.length - magicLen));
+    return Quotas.parseFrom(new ByteArrayInputStream(data, offset + magicLen, length - magicLen));
   }
 
   protected static byte[] quotasToData(final Quotas data) throws IOException {
@@ -316,6 +324,7 @@ public class QuotaTableUtil {
     boolean hasSettings = false;
     hasSettings |= quotas.hasThrottle();
     hasSettings |= quotas.hasBypassGlobals();
+    hasSettings |= quotas.hasSpace();
     return !hasSettings;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7abe4c1b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index 647a770..bd9f410 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
@@ -293,9 +295,11 @@ public class MasterQuotaManager implements RegionStateListener {
     Quotas quotas = quotaOps.fetch();
     quotaOps.preApply(quotas);
 
+    // Copy the user request into the Quotas object
     Quotas.Builder builder = (quotas != null) ? quotas.toBuilder() : Quotas.newBuilder();
     if (req.hasThrottle()) applyThrottle(builder, req.getThrottle());
     if (req.hasBypassGlobals()) applyBypassGlobals(builder, req.getBypassGlobals());
+    if (req.hasSpaceLimit()) applySpaceLimit(builder, req.getSpaceLimit());
 
     // Submit new changes
     quotas = builder.build();
@@ -437,6 +441,32 @@ public class MasterQuotaManager implements RegionStateListener {
     }
   }
 
+  /**
+   * Adds the information from the provided {@link SpaceLimitRequest} to the {@link Quotas} builder.
+   *
+   * @param quotas The builder to update.
+   * @param req The request to extract space quota information from.
+   */
+  void applySpaceLimit(final Quotas.Builder quotas, final SpaceLimitRequest req) {
+    if (req.hasQuota()) {
+      applySpaceQuota(quotas, req.getQuota());
+    }
+  }
+
+  /**
+   * Merges the provided {@link SpaceQuota} into the given {@link Quotas} builder.
+   *
+   * @param quotas The Quotas builder instance to update
+   * @param quota The SpaceQuota instance to update from
+   */
+  void applySpaceQuota(final Quotas.Builder quotas, final SpaceQuota quota) {
+    // Create a builder for Quotas
+    SpaceQuota.Builder builder = quotas.hasSpace() ? quotas.getSpace().toBuilder() :
+        SpaceQuota.newBuilder();
+    // Update the values from the provided quota into the new one and set it on Quotas.
+    quotas.setSpace(builder.mergeFrom(quota).build());
+  }
+
   private void validateTimedQuota(final TimedQuota timedQuota) throws IOException {
     if (timedQuota.getSoftLimit() < 1) {
       throw new DoNotRetryIOException(new UnsupportedOperationException(

http://git-wip-us.apache.org/repos/asf/hbase/blob/7abe4c1b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
index d6e8952..4fc95cd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaAdmin.java
@@ -22,20 +22,32 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import com.google.common.collect.Iterables;
+
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 /**
@@ -233,10 +245,121 @@ public class TestQuotaAdmin {
     assertNumResults(0, null);
   }
 
+  @Test
+  public void testSetAndGetSpaceQuota() throws Exception {
+    Admin admin = TEST_UTIL.getAdmin();
+    final TableName tn = TableName.valueOf("table1");
+    final long sizeLimit = 1024L * 1024L * 1024L * 1024L * 5L; // 5TB
+    final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_WRITES;
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tn, sizeLimit, violationPolicy);
+    admin.setQuota(settings);
+
+    // Verify the Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(sizeLimit, violationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve it via the QuotaRetriever API
+    QuotaRetriever scanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(sizeLimit, violationPolicy, Iterables.getOnlyElement(scanner));
+    } finally {
+      scanner.close();
+    }
+  }
+
+  @Test
+  public void testSetAndModifyQuota() throws Exception {
+    Admin admin = TEST_UTIL.getAdmin();
+    final TableName tn = TableName.valueOf("table1");
+    final long originalSizeLimit = 1024L * 1024L * 1024L * 1024L * 5L; // 5TB
+    final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_WRITES;
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(
+        tn, originalSizeLimit, violationPolicy);
+    admin.setQuota(settings);
+
+    // Verify the Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(originalSizeLimit, violationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve it via the QuotaRetriever API
+    QuotaRetriever quotaScanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(originalSizeLimit, violationPolicy, Iterables.getOnlyElement(quotaScanner));
+    } finally {
+      quotaScanner.close();
+    }
+
+    // Setting a new size and policy should be reflected
+    final long newSizeLimit = 1024L * 1024L * 1024L * 1024L; // 1TB
+    final SpaceViolationPolicy newViolationPolicy = SpaceViolationPolicy.NO_WRITES_COMPACTIONS;
+    QuotaSettings newSettings = QuotaSettingsFactory.limitTableSpace(
+        tn, newSizeLimit, newViolationPolicy);
+    admin.setQuota(newSettings);
+
+    // Verify the new Quotas in the table
+    try (Table quotaTable = TEST_UTIL.getConnection().getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) {
+      ResultScanner scanner = quotaTable.getScanner(new Scan());
+      try {
+        Result r = Iterables.getOnlyElement(scanner);
+        CellScanner cells = r.cellScanner();
+        assertTrue("Expected to find a cell", cells.advance());
+        assertSpaceQuota(newSizeLimit, newViolationPolicy, cells.current());
+      } finally {
+        scanner.close();
+      }
+    }
+
+    // Verify we can retrieve the new quota via the QuotaRetriever API
+    quotaScanner = QuotaRetriever.open(admin.getConfiguration());
+    try {
+      assertSpaceQuota(newSizeLimit, newViolationPolicy, Iterables.getOnlyElement(quotaScanner));
+    } finally {
+      quotaScanner.close();
+    }
+  }
+
   private void assertNumResults(int expected, final QuotaFilter filter) throws Exception {
     assertEquals(expected, countResults(filter));
   }
 
+  private void assertSpaceQuota(
+      long sizeLimit, SpaceViolationPolicy violationPolicy, Cell cell) throws Exception {
+    Quotas q = QuotaTableUtil.quotasFromData(
+        cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+    assertTrue("Quota should have space quota defined", q.hasSpace());
+    QuotaProtos.SpaceQuota spaceQuota = q.getSpace();
+    assertEquals(sizeLimit, spaceQuota.getSoftLimit());
+    assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(spaceQuota.getViolationPolicy()));
+  }
+
+  private void assertSpaceQuota(
+      long sizeLimit, SpaceViolationPolicy violationPolicy, QuotaSettings actualSettings) {
+    assertTrue("The actual QuotaSettings was not an instance of " + SpaceLimitSettings.class
+        + " but of " + actualSettings.getClass(), actualSettings instanceof SpaceLimitSettings);
+    SpaceLimitRequest spaceLimitRequest = ((SpaceLimitSettings) actualSettings).getProto();
+    assertEquals(sizeLimit, spaceLimitRequest.getQuota().getSoftLimit());
+    assertEquals(violationPolicy,
+        ProtobufUtil.toViolationPolicy(spaceLimitRequest.getQuota().getViolationPolicy()));
+  }
+
   private int countResults(final QuotaFilter filter) throws Exception {
     QuotaRetriever scanner = QuotaRetriever.open(TEST_UTIL.getConfiguration(), filter);
     try {


[21/50] [abbrv] hbase git commit: HBASE-16867 Procedure V2 - Check ACLs for MasterRpcServices' queueLock() and lockHeartbeat().

Posted by el...@apache.org.
HBASE-16867 Procedure V2 - Check ACLs for MasterRpcServices' queueLock() and lockHeartbeat().

Change-Id: I03773059c169022318cf7953110bb022b6ad216d

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/558a6bb9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/558a6bb9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/558a6bb9

Branch: refs/heads/HBASE-16961
Commit: 558a6bb9d71cde7fba240a8a454c32367f0ef4c0
Parents: 76dc957
Author: Apekshit Sharma <ap...@apache.org>
Authored: Mon Oct 17 21:03:49 2016 -0700
Committer: Michael Stack <st...@apache.org>
Committed: Thu Jan 19 16:42:57 2017 -0800

----------------------------------------------------------------------
 .../hbase/security/access/AccessController.java | 46 +++++++++--
 .../security/access/TestAccessController.java   | 80 +++++++++++++++++++-
 2 files changed, 116 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/558a6bb9/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
index 5011819..f68ae94 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessController.java
@@ -86,16 +86,13 @@ import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.locking.LockProcedure.LockType;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -111,6 +108,11 @@ import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.ByteRange;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -2739,4 +2741,36 @@ public class AccessController extends BaseMasterAndRegionObserver
       String regex) throws IOException {
     requirePermission(getActiveUser(ctx), "listReplicationPeers", Action.ADMIN);
   }
+
+  @Override
+  public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockType type, String description)
+  throws IOException {
+    // There are operations in the CREATE and ADMIN domain which may require lock, READ
+    // or WRITE. So for any lock request, we check for these two perms irrespective of lock type.
+    String reason = String.format("Lock %s, description=%s", type, description);
+    checkLockPermissions(getActiveUser(ctx), namespace, tableName, regionInfos, reason);
+  }
+
+  @Override
+  public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+    String reason = "Heartbeat for lock " + proc.getProcId();
+    checkLockPermissions(getActiveUser(ctx), null, proc.getTableName(), null, reason);
+  }
+
+  private void checkLockPermissions(User user, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, String reason)
+  throws IOException {
+    if (namespace != null && !namespace.isEmpty()) {
+      requireNamespacePermission(user, reason, namespace, Action.ADMIN, Action.CREATE);
+    } else if (tableName != null || (regionInfos != null && regionInfos.length > 0)) {
+      // So, either a table or regions op. If latter, check perms ons table.
+      TableName tn = tableName != null? tableName: regionInfos[0].getTable();
+      requireTablePermission(user, reason, tn, null, null,
+          Action.ADMIN, Action.CREATE);
+    } else {
+      throw new DoNotRetryIOException("Invalid lock level when requesting permissions.");
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/558a6bb9/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 5dcea53..8685b44 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -92,10 +92,11 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
 import org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.locking.LockProcedure.LockType;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -105,8 +106,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.CheckPermissionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.Region;
@@ -117,6 +116,9 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -127,6 +129,7 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.mockito.Mockito;
 
 import com.google.protobuf.BlockingRpcChannel;
 import com.google.protobuf.RpcCallback;
@@ -2976,4 +2979,73 @@ public class TestAccessController extends SecureTestUtil {
     verifyAllowed(action, SUPERUSER, USER_ADMIN);
     verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER);
   }
-}
+
+  @Test
+  public void testRemoteLocks() throws Exception {
+    String namespace = "preQueueNs";
+    final TableName tableName = TableName.valueOf(namespace, "testTable");
+    HRegionInfo[] regionInfos = new HRegionInfo[] {new HRegionInfo(tableName)};
+
+    // Setup Users
+    // User will be granted ADMIN and CREATE on namespace. Should be denied before grant.
+    User namespaceUser = User.createUserForTesting(conf, "qLNSUser", new String[0]);
+    // User will be granted ADMIN and CREATE on table. Should be denied before grant.
+    User tableACUser = User.createUserForTesting(conf, "qLTableACUser", new String[0]);
+    // User will be granted READ, WRITE, EXECUTE on table. Should be denied.
+    User tableRWXUser = User.createUserForTesting(conf, "qLTableRWXUser", new String[0]);
+    grantOnTable(TEST_UTIL, tableRWXUser.getShortName(), tableName, null, null,
+        Action.READ, Action.WRITE, Action.EXEC);
+    // User with global READ, WRITE, EXECUTE should be denied lock access.
+    User globalRWXUser = User.createUserForTesting(conf, "qLGlobalRWXUser", new String[0]);
+    grantGlobal(TEST_UTIL, globalRWXUser.getShortName(), Action.READ, Action.WRITE, Action.EXEC);
+
+    AccessTestAction namespaceLockAction = new AccessTestAction() {
+      @Override public Object run() throws Exception {
+        ACCESS_CONTROLLER.preRequestLock(ObserverContext.createAndPrepare(CP_ENV, null), namespace,
+            null, null, LockType.EXCLUSIVE, null);
+        return null;
+      }
+    };
+    verifyAllowed(namespaceLockAction, SUPERUSER, USER_ADMIN);
+    verifyDenied(namespaceLockAction, globalRWXUser, tableACUser, namespaceUser, tableRWXUser);
+    grantOnNamespace(TEST_UTIL, namespaceUser.getShortName(), namespace, Action.ADMIN);
+    verifyAllowed(namespaceLockAction, namespaceUser);
+
+    AccessTestAction tableLockAction = new AccessTestAction() {
+      @Override public Object run() throws Exception {
+        ACCESS_CONTROLLER.preRequestLock(ObserverContext.createAndPrepare(CP_ENV, null),
+            null, tableName, null, LockType.EXCLUSIVE, null);
+        return null;
+      }
+    };
+    verifyAllowed(tableLockAction, SUPERUSER, USER_ADMIN, namespaceUser);
+    verifyDenied(tableLockAction, globalRWXUser, tableACUser, tableRWXUser);
+    grantOnTable(TEST_UTIL, tableACUser.getShortName(), tableName, null, null,
+        Action.ADMIN, Action.CREATE);
+    verifyAllowed(tableLockAction, tableACUser);
+
+    AccessTestAction regionsLockAction = new AccessTestAction() {
+      @Override public Object run() throws Exception {
+        ACCESS_CONTROLLER.preRequestLock(ObserverContext.createAndPrepare(CP_ENV, null),
+            null, null, regionInfos, LockType.EXCLUSIVE, null);
+        return null;
+      }
+    };
+    verifyAllowed(regionsLockAction, SUPERUSER, USER_ADMIN, namespaceUser, tableACUser);
+    verifyDenied(regionsLockAction, globalRWXUser, tableRWXUser);
+
+    // Test heartbeats
+    // Create a lock procedure and try sending heartbeat to it. It doesn't matter how the lock
+    // was created, we just need namespace from the lock's tablename.
+    LockProcedure proc = new LockProcedure(conf, tableName, LockType.EXCLUSIVE, "test", null);
+    AccessTestAction regionLockHeartbeatAction = new AccessTestAction() {
+      @Override public Object run() throws Exception {
+        ACCESS_CONTROLLER.preLockHeartbeat(ObserverContext.createAndPrepare(CP_ENV, null),
+            proc, false);
+        return null;
+      }
+    };
+    verifyAllowed(regionLockHeartbeatAction, SUPERUSER, USER_ADMIN, namespaceUser, tableACUser);
+    verifyDenied(regionLockHeartbeatAction, globalRWXUser, tableRWXUser);
+  }
+}
\ No newline at end of file


[47/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes white spaces (Josh Elser)

Posted by el...@apache.org.
HBASE-16995 Build client Java API and client protobuf messages - addendum fixes white spaces (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/26a643de
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/26a643de
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/26a643de

Branch: refs/heads/HBASE-16961
Commit: 26a643de30e31652cf45cd3e84be24364dcbf9d8
Parents: df2916f
Author: tedyu <yu...@gmail.com>
Authored: Thu Nov 17 10:42:18 2016 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Mon Jan 23 17:48:59 2017 -0500

----------------------------------------------------------------------
 .../hbase/quotas/TestQuotaSettingsFactory.java    |  2 +-
 .../shaded/protobuf/generated/MasterProtos.java   |  2 +-
 .../shaded/protobuf/generated/QuotaProtos.java    | 18 +++++++++---------
 .../hbase/protobuf/generated/QuotaProtos.java     |  4 ++--
 4 files changed, 13 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/26a643de/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
index 17015d6..e0012a7 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
@@ -44,7 +44,7 @@ import org.junit.experimental.categories.Category;
  */
 @Category(SmallTests.class)
 public class TestQuotaSettingsFactory {
-  
+
   @Test
   public void testAllQuotasAddedToList() {
     final SpaceQuota spaceQuota = SpaceQuota.newBuilder()

http://git-wip-us.apache.org/repos/asf/hbase/blob/26a643de/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 6305779..2570010 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -61117,7 +61117,7 @@ public final class MasterProtos {
        * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
        */
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder> 
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder>
           getSpaceLimitFieldBuilder() {
         if (spaceLimitBuilder_ == null) {
           spaceLimitBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<

http://git-wip-us.apache.org/repos/asf/hbase/blob/26a643de/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index a715115..673fb2c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -4362,7 +4362,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
        */
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getSpaceFieldBuilder() {
         if (spaceBuilder_ == null) {
           spaceBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -5957,7 +5957,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
        */
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getQuotaFieldBuilder() {
         if (quotaBuilder_ == null) {
           quotaBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
@@ -6020,37 +6020,37 @@ public final class QuotaProtos {
 
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TimedQuota_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_Throttle_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_Throttle_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_ThrottleRequest_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_Quotas_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_Quotas_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_QuotaUsage_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_QuotaUsage_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_SpaceQuota_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_SpaceQuota_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_SpaceLimitRequest_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/26a643de/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 1925828..cc40536 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -4258,7 +4258,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getSpaceFieldBuilder() {
         if (spaceBuilder_ == null) {
           spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -5754,7 +5754,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getQuotaFieldBuilder() {
         if (quotaBuilder_ == null) {
           quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<


[39/50] [abbrv] hbase git commit: HBASE-17367 Make HTable#getBufferedMutator thread safe

Posted by el...@apache.org.
HBASE-17367 Make HTable#getBufferedMutator thread safe


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba4a926b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba4a926b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba4a926b

Branch: refs/heads/HBASE-16961
Commit: ba4a926b62f2e3858e0fae7e74087e20947fd72f
Parents: 07e0a30
Author: Yu Li <li...@apache.org>
Authored: Mon Jan 23 18:23:24 2017 +0800
Committer: Yu Li <li...@apache.org>
Committed: Mon Jan 23 18:23:24 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/HTable.java  | 19 +++----
 .../hbase/client/TestFromClientSide3.java       | 52 ++++++++++++++++++++
 2 files changed, 62 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ba4a926b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 3bb0a77..72d71eb 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -108,7 +108,8 @@ public class HTable implements Table {
   private final Configuration configuration;
   private final ConnectionConfiguration connConfiguration;
   @VisibleForTesting
-  BufferedMutatorImpl mutator;
+  volatile BufferedMutatorImpl mutator;
+  private final Object mutatorLock = new Object();
   private boolean closed = false;
   private final int scannerCaching;
   private final long scannerMaxResultSize;
@@ -1333,14 +1334,14 @@ public class HTable implements Table {
   @VisibleForTesting
   BufferedMutator getBufferedMutator() throws IOException {
     if (mutator == null) {
-      this.mutator = (BufferedMutatorImpl) connection.getBufferedMutator(
-          new BufferedMutatorParams(tableName)
-              .pool(pool)
-              .writeBufferSize(writeBufferSize)
-              .maxKeyValueSize(connConfiguration.getMaxKeyValueSize())
-              .opertationTimeout(operationTimeout)
-              .rpcTimeout(writeRpcTimeout)
-      );
+      synchronized (mutatorLock) {
+        if (mutator == null) {
+          this.mutator = (BufferedMutatorImpl) connection.getBufferedMutator(
+            new BufferedMutatorParams(tableName).pool(pool).writeBufferSize(writeBufferSize)
+                .maxKeyValueSize(connConfiguration.getMaxKeyValueSize())
+                .opertationTimeout(operationTimeout).rpcTimeout(writeRpcTimeout));
+        }
+      }
     }
     return mutator;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/ba4a926b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index b863b40..6f9637f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -28,6 +28,8 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -749,6 +751,56 @@ public class TestFromClientSide3 {
 
   }
 
+  @Test
+  public void testPutThenGetWithMultipleThreads() throws Exception {
+    TableName TABLE = TableName.valueOf("testParallelPutAndGet");
+    final int THREAD_NUM = 20;
+    final int ROUND_NUM = 10;
+    for (int round = 0; round < ROUND_NUM; round++) {
+      ArrayList<Thread> threads = new ArrayList<>(THREAD_NUM);
+      final AtomicInteger successCnt = new AtomicInteger(0);
+      Table ht = TEST_UTIL.createTable(TABLE, FAMILY);
+      for (int i = 0; i < THREAD_NUM; i++) {
+        final int index = i;
+        Thread t = new Thread(new Runnable() {
+
+          @Override
+          public void run() {
+            final byte[] row = Bytes.toBytes("row-" + index);
+            final byte[] value = Bytes.toBytes("v" + index);
+            try {
+              Put put = new Put(row);
+              put.addColumn(FAMILY, QUALIFIER, value);
+              ht.put(put);
+              Get get = new Get(row);
+              Result result = ht.get(get);
+              byte[] returnedValue = result.getValue(FAMILY, QUALIFIER);
+              if (Bytes.equals(value, returnedValue)) {
+                successCnt.getAndIncrement();
+              } else {
+                LOG.error("Should be equal but not, original value: " + Bytes.toString(value)
+                    + ", returned value: "
+                    + (returnedValue == null ? "null" : Bytes.toString(returnedValue)));
+              }
+            } catch (Throwable e) {
+              // do nothing
+            }
+          }
+        });
+        threads.add(t);
+      }
+      for (Thread t : threads) {
+        t.start();
+      }
+      for (Thread t : threads) {
+        t.join();
+      }
+      assertEquals("Not equal in round " + round, THREAD_NUM, successCnt.get());
+      ht.close();
+      TEST_UTIL.deleteTable(TABLE);
+    }
+  }
+
   private static void assertNoLocks(final TableName tableName) throws IOException, InterruptedException {
     HRegion region = (HRegion) find(tableName);
     assertEquals(0, region.getLockedRows().size());


[12/50] [abbrv] hbase git commit: HBASE-17081 Flush the entire CompactingMemStore content to disk - recommit (Anastasia)

Posted by el...@apache.org.
HBASE-17081 Flush the entire CompactingMemStore content to disk - recommit
(Anastasia)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b779143f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b779143f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b779143f

Branch: refs/heads/HBASE-16961
Commit: b779143fdcfb1ae3bfe04f2434d6ca3d5f11b587
Parents: 805d39f
Author: Ramkrishna <ra...@intel.com>
Authored: Wed Jan 18 14:40:47 2017 +0530
Committer: Ramkrishna <ra...@intel.com>
Committed: Wed Jan 18 14:40:47 2017 +0530

----------------------------------------------------------------------
 .../hbase/regionserver/CompactingMemStore.java  |  64 +++-
 .../hbase/regionserver/CompactionPipeline.java  |  12 +
 .../regionserver/CompositeImmutableSegment.java | 306 +++++++++++++++++++
 .../hbase/regionserver/ImmutableSegment.java    |  22 +-
 .../hbase/regionserver/MemStoreCompactor.java   |   2 +-
 .../hadoop/hbase/regionserver/MemstoreSize.java |  25 +-
 .../hadoop/hbase/regionserver/Segment.java      |  21 +-
 .../hbase/regionserver/SegmentFactory.java      |  10 +
 .../TestWalAndCompactingMemStoreFlush.java      |  11 +-
 9 files changed, 452 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 99c1685..ed7d274 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -72,6 +72,7 @@ public class CompactingMemStore extends AbstractMemStore {
   private final AtomicBoolean inMemoryFlushInProgress = new AtomicBoolean(false);
   @VisibleForTesting
   private final AtomicBoolean allowCompaction = new AtomicBoolean(true);
+  private boolean compositeSnapshot = true;
 
   public static final long DEEP_OVERHEAD = AbstractMemStore.DEEP_OVERHEAD
       + 6 * ClassSize.REFERENCE // Store, RegionServicesForStores, CompactionPipeline,
@@ -160,7 +161,12 @@ public class CompactingMemStore extends AbstractMemStore {
       stopCompaction();
       pushActiveToPipeline(this.active);
       snapshotId = EnvironmentEdgeManager.currentTime();
-      pushTailToSnapshot();
+      // in both cases whatever is pushed to snapshot is cleared from the pipeline
+      if (compositeSnapshot) {
+        pushPipelineToSnapshot();
+      } else {
+        pushTailToSnapshot();
+      }
     }
     return new MemStoreSnapshot(snapshotId, this.snapshot);
   }
@@ -173,8 +179,13 @@ public class CompactingMemStore extends AbstractMemStore {
   public MemstoreSize getFlushableSize() {
     MemstoreSize snapshotSize = getSnapshotSize();
     if (snapshotSize.getDataSize() == 0) {
-      // if snapshot is empty the tail of the pipeline is flushed
-      snapshotSize = pipeline.getTailSize();
+      // if snapshot is empty the tail of the pipeline (or everything in the memstore) is flushed
+      if (compositeSnapshot) {
+        snapshotSize = pipeline.getPipelineSize();
+        snapshotSize.incMemstoreSize(this.active.keySize(), this.active.heapOverhead());
+      } else {
+        snapshotSize = pipeline.getTailSize();
+      }
     }
     return snapshotSize.getDataSize() > 0 ? snapshotSize
         : new MemstoreSize(this.active.keySize(), this.active.heapOverhead());
@@ -221,10 +232,20 @@ public class CompactingMemStore extends AbstractMemStore {
     List<Segment> list = new ArrayList<>(pipelineList.size() + 2);
     list.add(this.active);
     list.addAll(pipelineList);
-    list.add(this.snapshot);
+    list.addAll(this.snapshot.getAllSegments());
+
     return list;
   }
 
+  // the following three methods allow to manipulate the settings of composite snapshot
+  public void setCompositeSnapshot(boolean useCompositeSnapshot) {
+    this.compositeSnapshot = useCompositeSnapshot;
+  }
+
+  public boolean isCompositeSnapshot() {
+    return this.compositeSnapshot;
+  }
+
   public boolean swapCompactedSegments(VersionedSegmentsList versionedList, ImmutableSegment result,
       boolean merge) {
     return pipeline.swap(versionedList, result, !merge);
@@ -265,17 +286,20 @@ public class CompactingMemStore extends AbstractMemStore {
    */
   public List<KeyValueScanner> getScanners(long readPt) throws IOException {
     List<? extends Segment> pipelineList = pipeline.getSegments();
-    long order = pipelineList.size();
+    int order = pipelineList.size() + snapshot.getNumOfSegments();
     // The list of elements in pipeline + the active element + the snapshot segment
     // TODO : This will change when the snapshot is made of more than one element
     // The order is the Segment ordinal
-    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(pipelineList.size() + 2);
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(order+1);
     list.add(this.active.getScanner(readPt, order + 1));
     for (Segment item : pipelineList) {
       list.add(item.getScanner(readPt, order));
       order--;
     }
-    list.add(this.snapshot.getScanner(readPt, order));
+    for (Segment item : snapshot.getAllSegments()) {
+      list.add(item.getScanner(readPt, order));
+      order--;
+    }
     return Collections.<KeyValueScanner> singletonList(new MemStoreScanner(getComparator(), list));
   }
 
@@ -382,13 +406,37 @@ public class CompactingMemStore extends AbstractMemStore {
     pipeline.swap(segments,null,false); // do not close segments as they are in snapshot now
   }
 
+  private void pushPipelineToSnapshot() {
+    int iterationsCnt = 0;
+    boolean done = false;
+    while (!done) {
+      iterationsCnt++;
+      VersionedSegmentsList segments = pipeline.getVersionedList();
+      pushToSnapshot(segments.getStoreSegments());
+      // swap can return false in case the pipeline was updated by ongoing compaction
+      // and the version increase, the chance of it happenning is very low
+      done = pipeline.swap(segments, null, false); // don't close segments; they are in snapshot now
+      if (iterationsCnt>2) {
+        // practically it is impossible that this loop iterates more than two times
+        // (because the compaction is stopped and none restarts it while in snapshot request),
+        // however stopping here for the case of the infinite loop causing by any error
+        LOG.warn("Multiple unsuccessful attempts to push the compaction pipeline to snapshot," +
+            " while flushing to disk.");
+        this.snapshot = SegmentFactory.instance().createImmutableSegment(getComparator());
+        break;
+      }
+    }
+  }
+
   private void pushToSnapshot(List<ImmutableSegment> segments) {
     if(segments.isEmpty()) return;
     if(segments.size() == 1 && !segments.get(0).isEmpty()) {
       this.snapshot = segments.get(0);
       return;
+    } else { // create composite snapshot
+      this.snapshot =
+          SegmentFactory.instance().createCompositeImmutableSegment(getComparator(), segments);
     }
-    // TODO else craete composite snapshot
   }
 
   private RegionServicesForStores getRegionServices() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index fafdbee..e533bd0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -238,6 +238,18 @@ public class CompactionPipeline {
     return new MemstoreSize(localCopy.peekLast().keySize(), localCopy.peekLast().heapOverhead());
   }
 
+  public MemstoreSize getPipelineSize() {
+    long keySize = 0;
+    long heapOverhead = 0;
+    LinkedList<? extends Segment> localCopy = readOnlyCopy;
+    if (localCopy.isEmpty()) return MemstoreSize.EMPTY_SIZE;
+    for (Segment segment : localCopy) {
+      keySize += segment.keySize();
+      heapOverhead += segment.heapOverhead();
+    }
+    return new MemstoreSize(keySize, heapOverhead);
+  }
+
   private void swapSuffix(List<? extends Segment> suffix, ImmutableSegment segment,
       boolean closeSegmentsInSuffix) {
     // During index merge we won't be closing the segments undergoing the merge. Segment#close()

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
new file mode 100644
index 0000000..30d17fb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompositeImmutableSegment.java
@@ -0,0 +1,306 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellComparator;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Scan;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.SortedSet;
+
+/**
+ * The CompositeImmutableSegments is created as a collection of ImmutableSegments and supports
+ * the interface of a single ImmutableSegments.
+ * The CompositeImmutableSegments is planned to be used only as a snapshot,
+ * thus only relevant interfaces are supported
+ */
+@InterfaceAudience.Private
+public class CompositeImmutableSegment extends ImmutableSegment {
+
+  private final List<ImmutableSegment> segments;
+  private final CellComparator comparator;
+  // CompositeImmutableSegment is used for snapshots and snapshot should
+  // support getTimeRangeTracker() interface.
+  // Thus we hold a constant TRT build in the construction time from TRT of the given segments.
+  private final TimeRangeTracker timeRangeTracker;
+
+  private long keySize = 0;
+
+  public CompositeImmutableSegment(CellComparator comparator, List<ImmutableSegment> segments) {
+    super(comparator);
+    this.comparator = comparator;
+    this.segments = segments;
+    this.timeRangeTracker = new TimeRangeTracker();
+    for (ImmutableSegment s : segments) {
+      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMax());
+      this.timeRangeTracker.includeTimestamp(s.getTimeRangeTracker().getMin());
+      this.keySize += s.keySize();
+    }
+  }
+
+  @VisibleForTesting
+  public List<Segment> getAllSegments() {
+    return new LinkedList<Segment>(segments);
+  }
+
+  public int getNumOfSegments() {
+    return segments.size();
+  }
+
+  /**
+   * Builds a special scanner for the MemStoreSnapshot object that is different than the
+   * general segment scanner.
+   * @return a special scanner for the MemStoreSnapshot object
+   */
+  public KeyValueScanner getSnapshotScanner() {
+    return getScanner(Long.MAX_VALUE, Long.MAX_VALUE);
+  }
+
+  /**
+   * @return whether the segment has any cells
+   */
+  public boolean isEmpty() {
+    for (ImmutableSegment s : segments) {
+      if (!s.isEmpty()) return false;
+    }
+    return true;
+  }
+
+  /**
+   * @return number of cells in segment
+   */
+  public int getCellsCount() {
+    int result = 0;
+    for (ImmutableSegment s : segments) {
+      result += s.getCellsCount();
+    }
+    return result;
+  }
+
+  /**
+   * @return the first cell in the segment that has equal or greater key than the given cell
+   */
+  public Cell getFirstAfter(Cell cell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Closing a segment before it is being discarded
+   */
+  public void close() {
+    for (ImmutableSegment s : segments) {
+      s.close();
+    }
+  }
+
+  /**
+   * If the segment has a memory allocator the cell is being cloned to this space, and returned;
+   * otherwise the given cell is returned
+   * @return either the given cell or its clone
+   */
+  public Cell maybeCloneWithAllocator(Cell cell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public boolean shouldSeek(Scan scan, long oldestUnexpiredTS){
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public long getMinTimestamp(){
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Creates the scanner for the given read point
+   * @return a scanner for the given read point
+   */
+  public KeyValueScanner getScanner(long readPoint) {
+    // Long.MAX_VALUE is DEFAULT_SCANNER_ORDER
+    return getScanner(readPoint,Long.MAX_VALUE);
+  }
+
+  /**
+   * Creates the scanner for the given read point, and a specific order in a list
+   * @return a scanner for the given read point
+   */
+  public KeyValueScanner getScanner(long readPoint, long order) {
+    KeyValueScanner resultScanner;
+    List<KeyValueScanner> list = new ArrayList<KeyValueScanner>(segments.size());
+    for (ImmutableSegment s : segments) {
+      list.add(s.getScanner(readPoint, order));
+    }
+
+    try {
+      resultScanner = new MemStoreScanner(getComparator(), list);
+    } catch (IOException ie) {
+      throw new IllegalStateException(ie);
+    }
+
+    return resultScanner;
+  }
+
+  public boolean isTagsPresent() {
+    for (ImmutableSegment s : segments) {
+      if (s.isTagsPresent()) return true;
+    }
+    return false;
+  }
+
+  public void incScannerCount() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public void decScannerCount() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Setting the CellSet of the segment - used only for flat immutable segment for setting
+   * immutable CellSet after its creation in immutable segment constructor
+   * @return this object
+   */
+
+  protected CompositeImmutableSegment setCellSet(CellSet cellSetOld, CellSet cellSetNew) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * @return Sum of all cell sizes.
+   */
+  public long keySize() {
+    return this.keySize;
+  }
+
+  /**
+   * @return The heap overhead of this segment.
+   */
+  public long heapOverhead() {
+    long result = 0;
+    for (ImmutableSegment s : segments) {
+      result += s.heapOverhead();
+    }
+    return result;
+  }
+
+  /**
+   * Updates the heap size counter of the segment by the given delta
+   */
+  protected void incSize(long delta, long heapOverhead) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected void incHeapOverheadSize(long delta) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public long getMinSequenceId() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public TimeRangeTracker getTimeRangeTracker() {
+    return this.timeRangeTracker;
+  }
+
+  //*** Methods for SegmentsScanner
+  public Cell last() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public Iterator<Cell> iterator() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public SortedSet<Cell> headSet(Cell firstKeyOnRow) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public int compare(Cell left, Cell right) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  public int compareRows(Cell left, Cell right) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * @return a set of all cells in the segment
+   */
+  protected CellSet getCellSet() {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Returns the Cell comparator used by this segment
+   * @return the Cell comparator used by this segment
+   */
+  protected CellComparator getComparator() {
+    return comparator;
+  }
+
+  protected void internalAdd(Cell cell, boolean mslabUsed, MemstoreSize memstoreSize) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected void updateMetaInfo(Cell cellToAdd, boolean succ, boolean mslabUsed,
+      MemstoreSize memstoreSize) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  protected long heapOverheadChange(Cell cell, boolean succ) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  /**
+   * Returns a subset of the segment cell set, which starts with the given cell
+   * @param firstCell a cell in the segment
+   * @return a subset of the segment cell set, which starts with the given cell
+   */
+  protected SortedSet<Cell> tailSet(Cell firstCell) {
+    throw new IllegalStateException("Not supported by CompositeImmutableScanner");
+  }
+
+  // Debug methods
+  /**
+   * Dumps all cells of the segment into the given log
+   */
+  void dump(Log log) {
+    for (ImmutableSegment s : segments) {
+      s.dump(log);
+    }
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb =
+        new StringBuilder("This is CompositeImmutableSegment and those are its segments:: ");
+    for (ImmutableSegment s : segments) {
+      sb.append(s.toString());
+    }
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
index 0fae6c3..faa9b67 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
@@ -29,6 +29,9 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.TimeRange;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
 /**
  * ImmutableSegment is an abstract class that extends the API supported by a {@link Segment},
@@ -68,6 +71,14 @@ public class ImmutableSegment extends Segment {
 
   /////////////////////  CONSTRUCTORS  /////////////////////
   /**------------------------------------------------------------------------
+   * Empty C-tor to be used only for CompositeImmutableSegment
+   */
+  protected ImmutableSegment(CellComparator comparator) {
+    super(comparator);
+    this.timeRange = null;
+  }
+
+  /**------------------------------------------------------------------------
    * Copy C-tor to be used when new ImmutableSegment is being built from a Mutable one.
    * This C-tor should be used when active MutableSegment is pushed into the compaction
    * pipeline and becomes an ImmutableSegment.
@@ -141,6 +152,15 @@ public class ImmutableSegment extends Segment {
     return this.timeRange.getMin();
   }
 
+  public int getNumOfSegments() {
+    return 1;
+  }
+
+  public List<Segment> getAllSegments() {
+    List<Segment> res = new ArrayList<Segment>(Arrays.asList(this));
+    return res;
+  }
+
   /**------------------------------------------------------------------------
    * Change the CellSet of this ImmutableSegment from one based on ConcurrentSkipListMap to one
    * based on CellArrayMap.
@@ -231,7 +251,7 @@ public class ImmutableSegment extends Segment {
     Cell curCell;
     int idx = 0;
     // create this segment scanner with maximal possible read point, to go over all Cells
-    SegmentScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
+    KeyValueScanner segmentScanner = this.getScanner(Long.MAX_VALUE);
 
     try {
       while ((curCell = segmentScanner.next()) != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
index 84f88f0..2174d89 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreCompactor.java
@@ -56,7 +56,7 @@ public class MemStoreCompactor {
 
   // The upper bound for the number of segments we store in the pipeline prior to merging.
   // This constant is subject to further experimentation.
-  private static final int THRESHOLD_PIPELINE_SEGMENTS = 1;
+  private static final int THRESHOLD_PIPELINE_SEGMENTS = 30; // stands here for infinity
 
   private static final Log LOG = LogFactory.getLog(MemStoreCompactor.class);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
index 77cea51..fa7c342 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemstoreSize.java
@@ -25,19 +25,32 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 @InterfaceAudience.Private
 public class MemstoreSize {
 
-  static final MemstoreSize EMPTY_SIZE = new MemstoreSize();
-
   private long dataSize;
   private long heapOverhead;
+  final private boolean isEmpty;
+
+  static final MemstoreSize EMPTY_SIZE = new MemstoreSize(true);
 
   public MemstoreSize() {
     dataSize = 0;
     heapOverhead = 0;
+    isEmpty = false;
+  }
+
+  public MemstoreSize(boolean isEmpty) {
+    dataSize = 0;
+    heapOverhead = 0;
+    this.isEmpty = isEmpty;
+  }
+
+  public boolean isEmpty() {
+    return isEmpty;
   }
 
   public MemstoreSize(long dataSize, long heapOverhead) {
     this.dataSize = dataSize;
     this.heapOverhead = heapOverhead;
+    this.isEmpty = false;
   }
 
   public void incMemstoreSize(long dataSize, long heapOverhead) {
@@ -61,11 +74,13 @@ public class MemstoreSize {
   }
 
   public long getDataSize() {
-    return dataSize;
+
+    return isEmpty ? 0 : dataSize;
   }
 
   public long getHeapOverhead() {
-    return heapOverhead;
+
+    return isEmpty ? 0 : heapOverhead;
   }
 
   @Override
@@ -74,7 +89,7 @@ public class MemstoreSize {
       return false;
     }
     MemstoreSize other = (MemstoreSize) obj;
-    return this.dataSize == other.dataSize && this.heapOverhead == other.heapOverhead;
+    return getDataSize() == other.dataSize && getHeapOverhead() == other.heapOverhead;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
index afdfe6f..8581517 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Segment.java
@@ -18,7 +18,9 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.SortedSet;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
@@ -64,6 +66,15 @@ public abstract class Segment {
   protected final TimeRangeTracker timeRangeTracker;
   protected volatile boolean tagsPresent;
 
+  // Empty constructor to be used when Segment is used as interface,
+  // and there is no need in true Segments state
+  protected Segment(CellComparator comparator) {
+    this.comparator = comparator;
+    this.dataSize = new AtomicLong(0);
+    this.heapOverhead = new AtomicLong(0);
+    this.timeRangeTracker = new TimeRangeTracker();
+  }
+
   // This constructor is used to create empty Segments.
   protected Segment(CellSet cellSet, CellComparator comparator, MemStoreLAB memStoreLAB) {
     this.cellSet.set(cellSet);
@@ -91,7 +102,7 @@ public abstract class Segment {
    * Creates the scanner for the given read point
    * @return a scanner for the given read point
    */
-  public SegmentScanner getScanner(long readPoint) {
+  public KeyValueScanner getScanner(long readPoint) {
     return new SegmentScanner(this, readPoint);
   }
 
@@ -99,10 +110,16 @@ public abstract class Segment {
    * Creates the scanner for the given read point, and a specific order in a list
    * @return a scanner for the given read point
    */
-  public SegmentScanner getScanner(long readPoint, long order) {
+  public KeyValueScanner getScanner(long readPoint, long order) {
     return new SegmentScanner(this, readPoint, order);
   }
 
+  public List<KeyValueScanner> getScanners(long readPoint, long order) {
+    List<KeyValueScanner> scanners = new ArrayList<KeyValueScanner>(1);
+    scanners.add(getScanner(readPoint, order));
+    return scanners;
+  }
+
   /**
    * @return whether the segment has any cells
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
index 01e07ef..7e53026 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SegmentFactory.java
@@ -47,6 +47,13 @@ public final class SegmentFactory {
     return new ImmutableSegment(comparator, iterator, MemStoreLAB.newInstance(conf));
   }
 
+  // create composite immutable segment from a list of segments
+  public CompositeImmutableSegment createCompositeImmutableSegment(
+      final CellComparator comparator, List<ImmutableSegment> segments) {
+    return new CompositeImmutableSegment(comparator, segments);
+
+  }
+
   // create new flat immutable segment from compacting old immutable segments
   public ImmutableSegment createImmutableSegmentByCompaction(final Configuration conf,
       final CellComparator comparator, MemStoreSegmentsIterator iterator, int numOfCells,
@@ -102,6 +109,9 @@ public final class SegmentFactory {
 
   private MemStoreLAB getMergedMemStoreLAB(Configuration conf, List<ImmutableSegment> segments) {
     List<MemStoreLAB> mslabs = new ArrayList<MemStoreLAB>();
+    if (!conf.getBoolean(MemStoreLAB.USEMSLAB_KEY, MemStoreLAB.USEMSLAB_DEFAULT)) {
+      return null;
+    }
     for (ImmutableSegment segment : segments) {
       mslabs.add(segment.getMemStoreLAB());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b779143f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 133c53b..8215d53 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -622,10 +622,9 @@ public class TestWalAndCompactingMemStoreFlush {
     // Set up the configuration
     Configuration conf = HBaseConfiguration.create();
     conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 300 * 1024);
-    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY, FlushNonSloppyStoresFirstPolicy.class
-        .getName());
-    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 *
-        1024);
+    conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
+        FlushNonSloppyStoresFirstPolicy.class.getName());
+    conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN, 75 * 1024);
     conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 0.5);
     // set memstore to do data compaction and not to use the speculative scan
     conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
@@ -648,6 +647,10 @@ public class TestWalAndCompactingMemStoreFlush {
       region.put(createPut(2, i));
     }
 
+    // in this test check the non-composite snapshot - flashing only tail of the pipeline
+    ((CompactingMemStore) ((HStore) region.getStore(FAMILY1)).memstore).setCompositeSnapshot(false);
+    ((CompactingMemStore) ((HStore) region.getStore(FAMILY3)).memstore).setCompositeSnapshot(false);
+
     long totalMemstoreSize = region.getMemstoreSize();
 
     // Find the sizes of the memstores of each CF.


[28/50] [abbrv] hbase git commit: HBASE-17498 Implement listTables and listTableNames methods

Posted by el...@apache.org.
HBASE-17498 Implement listTables and listTableNames methods

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79018056
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79018056
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79018056

Branch: refs/heads/HBASE-16961
Commit: 79018056f542cde5850b1d1fc2fe248f0007fd66
Parents: 2285c57
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Jan 19 15:13:45 2017 +0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 14:12:13 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/AsyncAdmin.java  | 61 +++++++++++++++++--
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    | 62 ++++++++++++++++++--
 .../hadoop/hbase/client/TestAsyncAdmin.java     | 58 ++++++++++++++++++
 3 files changed, 171 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/79018056/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 1ce23b9..20a6070 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -17,8 +17,11 @@
  */
 package org.apache.hadoop.hbase.client;
 
-import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
+import java.util.regex.Pattern;
+ 
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -30,6 +33,54 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Unstable
 public interface AsyncAdmin {
+  /**
+   * List all the userspace tables.
+   * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
+   * @see #listTables(Pattern, boolean)
+   */
+  CompletableFuture<HTableDescriptor[]> listTables();
+
+  /**
+   * List all the tables matching the given pattern.
+   * @param regex The regular expression to match against
+   * @param includeSysTables False to match only against userspace tables
+   * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
+   * @see #listTables(Pattern, boolean)
+   */
+  CompletableFuture<HTableDescriptor[]> listTables(String regex, boolean includeSysTables);
+
+  /**
+   * List all the tables matching the given pattern.
+   * @param pattern The compiled regular expression to match against
+   * @param includeSysTables False to match only against userspace tables
+   * @return - returns an array of HTableDescriptors wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture<HTableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables);
+
+  /**
+   * List all of the names of userspace tables.
+   * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
+   * @see #listTableNames(Pattern, boolean)
+   */
+  CompletableFuture<TableName[]> listTableNames();
+
+  /**
+   * List all of the names of userspace tables.
+   * @param regex The regular expression to match against
+   * @param includeSysTables False to match only against userspace tables
+   * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
+   * @see #listTableNames(Pattern, boolean)
+   */
+  CompletableFuture<TableName[]> listTableNames(final String regex, final boolean includeSysTables);
+
+  /**
+   * List all of the names of userspace tables.
+   * @param pattern The regular expression to match against
+   * @param includeSysTables False to match only against userspace tables
+   * @return TableName[] an array of table names wrapped by a {@link CompletableFuture}.
+   */
+  CompletableFuture<TableName[]> listTableNames(final Pattern pattern,
+      final boolean includeSysTables);
 
   /**
    * @param tableName Table to check.
@@ -43,7 +94,7 @@ public interface AsyncAdmin {
    * @param on
    * @return Previous balancer value wrapped by a {@link CompletableFuture}.
    */
-  CompletableFuture<Boolean> setBalancerRunning(final boolean on) throws IOException;
+  CompletableFuture<Boolean> setBalancerRunning(final boolean on);
 
   /**
    * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
@@ -51,7 +102,7 @@ public interface AsyncAdmin {
    * @return True if balancer ran, false otherwise. The return value will be wrapped by a
    *         {@link CompletableFuture}.
    */
-  CompletableFuture<Boolean> balancer() throws IOException;
+  CompletableFuture<Boolean> balancer();
 
   /**
    * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the
@@ -61,12 +112,12 @@ public interface AsyncAdmin {
    * @return True if balancer ran, false otherwise. The return value will be wrapped by a
    *         {@link CompletableFuture}.
    */
-  CompletableFuture<Boolean> balancer(boolean force) throws IOException;
+  CompletableFuture<Boolean> balancer(boolean force);
 
   /**
    * Query the current state of the balancer.
    * @return true if the balancer is enabled, false otherwise.
    *         The return value will be wrapped by a {@link CompletableFuture}.
    */
-  CompletableFuture<Boolean> isBalancerEnabled() throws IOException;
+  CompletableFuture<Boolean> isBalancerEnabled();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/79018056/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 9398972..8682116 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
+import java.util.regex.Pattern;
 
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -28,9 +31,14 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
@@ -107,7 +115,51 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Boolean> setBalancerRunning(final boolean on) throws IOException {
+  public CompletableFuture<HTableDescriptor[]> listTables() {
+    return listTables((Pattern)null, false);
+  }
+
+  @Override
+  public CompletableFuture<HTableDescriptor[]> listTables(String regex, boolean includeSysTables) {
+    return listTables(Pattern.compile(regex), false);
+  }
+
+  @Override
+  public CompletableFuture<HTableDescriptor[]> listTables(Pattern pattern, boolean includeSysTables) {
+    return this
+        .<HTableDescriptor[]> newCaller()
+        .action(
+          (controller, stub) -> this
+              .<GetTableDescriptorsRequest, GetTableDescriptorsResponse, HTableDescriptor[]> call(
+                controller, stub, RequestConverter.buildGetTableDescriptorsRequest(pattern,
+                  includeSysTables), (s, c, req, done) -> s.getTableDescriptors(c, req, done), (
+                    resp) -> ProtobufUtil.getHTableDescriptorArray(resp))).call();
+  }
+
+  @Override
+  public CompletableFuture<TableName[]> listTableNames() {
+    return listTableNames((Pattern)null, false);
+  }
+
+  @Override
+  public CompletableFuture<TableName[]> listTableNames(String regex, boolean includeSysTables) {
+    return listTableNames(Pattern.compile(regex), false);
+  }
+
+  @Override
+  public CompletableFuture<TableName[]> listTableNames(Pattern pattern, boolean includeSysTables) {
+    return this
+        .<TableName[]> newCaller()
+        .action(
+          (controller, stub) -> this
+              .<GetTableNamesRequest, GetTableNamesResponse, TableName[]> call(controller, stub,
+                RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables), (s, c, req,
+                    done) -> s.getTableNames(c, req, done), (resp) -> ProtobufUtil
+                    .getTableNameArray(resp.getTableNamesList()))).call();
+  }
+
+  @Override
+  public CompletableFuture<Boolean> setBalancerRunning(final boolean on) {
     return this
         .<Boolean> newCaller()
         .action(
@@ -119,12 +171,12 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Boolean> balancer() throws IOException {
+  public CompletableFuture<Boolean> balancer() {
     return balancer(false);
   }
 
   @Override
-  public CompletableFuture<Boolean> balancer(boolean force) throws IOException {
+  public CompletableFuture<Boolean> balancer(boolean force) {
     return this
         .<Boolean> newCaller()
         .action(
@@ -134,7 +186,7 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
   }
 
   @Override
-  public CompletableFuture<Boolean> isBalancerEnabled() throws IOException {
+  public CompletableFuture<Boolean> isBalancerEnabled() {
     return this
         .<Boolean> newCaller()
         .action(
@@ -148,4 +200,4 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
   public CompletableFuture<Boolean> tableExists(TableName tableName) {
     return AsyncMetaTableAccessor.tableExists(connection, tableName);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/79018056/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
index 70b4cf2..ccd6873 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
@@ -18,15 +18,21 @@
 package org.apache.hadoop.hbase.client;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.regex.Pattern;
 
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -67,6 +73,58 @@ public class TestAsyncAdmin {
   }
 
   @Test
+  public void testListTables() throws Exception {
+    TableName t1 = TableName.valueOf("testListTables1");
+    TableName t2 = TableName.valueOf("testListTables2");
+    TableName t3 = TableName.valueOf("testListTables3");
+    TableName[] tables = new TableName[] { t1, t2, t3 };
+    for (int i = 0; i < tables.length; i++) {
+      TEST_UTIL.createTable(tables[i], FAMILY);
+    }
+
+    HTableDescriptor[] tableDescs = admin.listTables().get();
+    int size = tableDescs.length;
+    assertTrue(size >= tables.length);
+    for (int i = 0; i < tables.length && i < size; i++) {
+      boolean found = false;
+      for (int j = 0; j < tableDescs.length; j++) {
+        if (tableDescs[j].getTableName().equals(tables[i])) {
+          found = true;
+          break;
+        }
+      }
+      assertTrue("Not found: " + tables[i], found);
+    }
+
+    TableName[] tableNames = admin.listTableNames().get();
+    size = tableNames.length;
+    assertTrue(size >= tables.length);
+    for (int i = 0; i < tables.length && i < size; i++) {
+      boolean found = false;
+      for (int j = 0; j < tableNames.length; j++) {
+        if (tableNames[j].equals(tables[i])) {
+          found = true;
+          break;
+        }
+      }
+      assertTrue("Not found: " + tables[i], found);
+    }
+
+    for (int i = 0; i < tables.length; i++) {
+      TEST_UTIL.deleteTable(tables[i]);
+    }
+    tableDescs = admin.listTables().get();
+    assertEquals(0, tableDescs.length);
+    tableNames = admin.listTableNames().get();
+    assertEquals(0, tableNames.length);
+
+    tableDescs = admin.listTables((Pattern) null, true).get();
+    assertTrue("Not found system tables", tableDescs.length > 0);
+    tableNames = admin.listTableNames((Pattern) null, true).get();
+    assertTrue("Not found system tables", tableNames.length > 0);
+  }
+
+  @Test
   public void testTableExist() throws Exception {
     final TableName table = TableName.valueOf("testTableExist");
     boolean exist;


[31/50] [abbrv] hbase git commit: HBASE-10699 Set capacity on ArrayList where possible and use isEmpty instead of size() == 0

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
index e2016ef..ee32887 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/HFileArchiver.java
@@ -229,7 +229,7 @@ public class HFileArchiver {
     }
 
     // short circuit if we don't have any files to delete
-    if (compactedFiles.size() == 0) {
+    if (compactedFiles.isEmpty()) {
       LOG.debug("No store files to dispose, done!");
       return;
     }
@@ -311,7 +311,7 @@ public class HFileArchiver {
   private static List<File> resolveAndArchive(FileSystem fs, Path baseArchiveDir,
       Collection<File> toArchive, long start) throws IOException {
     // short circuit if no files to move
-    if (toArchive.size() == 0) return Collections.emptyList();
+    if (toArchive.isEmpty()) return Collections.emptyList();
 
     if (LOG.isTraceEnabled()) LOG.trace("moving files to the archive directory: " + baseArchiveDir);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
index 3af01ac..bfcf486 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/errorhandling/ForeignException.java
@@ -181,7 +181,7 @@ public class ForeignException extends IOException {
    *         the sender).
    */
   private static StackTraceElement[] toStackTrace(List<StackTraceElementMessage> traceList) {
-    if (traceList == null || traceList.size() == 0) {
+    if (traceList == null || traceList.isEmpty()) {
       return new StackTraceElement[0]; // empty array
     }
     StackTraceElement[] trace = new StackTraceElement[traceList.size()];

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 98e058d..625d01f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -562,7 +562,7 @@ public class FavoredNodeAssignmentHelper {
 
     // Is the rack valid? Do we recognize it?
     if (rack == null || getServersFromRack(rack) == null ||
-        getServersFromRack(rack).size() == 0) {
+        getServersFromRack(rack).isEmpty()) {
       return null;
     }
 
@@ -577,7 +577,7 @@ public class FavoredNodeAssignmentHelper {
         serversToChooseFrom.remove(StartcodeAgnosticServerName.valueOf(sn));
       }
       // Do we have any servers left to choose from?
-      if (serversToChooseFrom.size() == 0) {
+      if (serversToChooseFrom.isEmpty()) {
         return null;
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
index 5629f35..f24d9fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodesPlan.java
@@ -55,7 +55,7 @@ public class FavoredNodesPlan {
    * @param servers
    */
   public void updateFavoredNodesMap(HRegionInfo region, List<ServerName> servers) {
-    if (region == null || servers == null || servers.size() == 0) {
+    if (region == null || servers == null || servers.isEmpty()) {
       return;
     }
     this.favoredNodesMap.put(region.getRegionNameAsString(), servers);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
index 98c29e7..84c1fea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java
@@ -360,12 +360,12 @@ public class HttpServer implements FilterContainer {
         }
       }
 
-      if (endpoints.size() == 0 && connector == null) {
+      if (endpoints.isEmpty() && connector == null) {
         throw new HadoopIllegalArgumentException("No endpoints specified");
       }
 
       if (hostName == null) {
-        hostName = endpoints.size() == 0 ? connector.getHost() : endpoints.get(
+        hostName = endpoints.isEmpty() ? connector.getHost() : endpoints.get(
             0).getHost();
       }
 
@@ -1179,7 +1179,7 @@ public class HttpServer implements FilterContainer {
    */
   @Override
   public String toString() {
-    if (listeners.size() == 0) {
+    if (listeners.isEmpty()) {
       return "Inactive HttpServer";
     } else {
       StringBuilder sb = new StringBuilder("HttpServer (")

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 770f202..7de91e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -807,7 +807,7 @@ public class TableMapReduceUtil {
       throw new IllegalArgumentException("Must provide a configuration object.");
     }
     Set<String> paths = new HashSet<String>(conf.getStringCollection("tmpjars"));
-    if (paths.size() == 0) {
+    if (paths.isEmpty()) {
       throw new IllegalArgumentException("Configuration contains no tmpjars.");
     }
     StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index d2863e3..ef042af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -112,7 +112,7 @@ public class CatalogJanitor extends ScheduledChore {
           && !this.services.isInMaintenanceMode()
           && am != null
           && am.isFailoverCleanupDone()
-          && am.getRegionStates().getRegionsInTransition().size() == 0) {
+          && am.getRegionStates().getRegionsInTransition().isEmpty()) {
         scan();
       } else {
         LOG.warn("CatalogJanitor disabled! Not running scan.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 154958b..fbe8ec6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2902,7 +2902,7 @@ public class HMaster extends HRegionServer implements MasterServices {
       final String namespace, final String regex, final List<TableName> tableNameList,
       final boolean includeSysTables)
   throws IOException {
-    if (tableNameList == null || tableNameList.size() == 0) {
+    if (tableNameList == null || tableNameList.isEmpty()) {
       // request for all TableDescriptors
       Collection<HTableDescriptor> allHtds;
       if (namespace != null && namespace.length() > 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 7c2df61..09d5190 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -808,7 +808,7 @@ public class RegionStates {
     TableName table = hri.getTable();
     Map<String, RegionState> indexMap = regionStatesTableIndex.get(table);
     indexMap.remove(encodedName);
-    if (indexMap.size() == 0)
+    if (indexMap.isEmpty())
       regionStatesTableIndex.remove(table);
     lastAssignments.remove(encodedName);
     ServerName sn = regionAssignments.remove(hri);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 38493cd..37830f7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -554,7 +554,7 @@ public class ServerManager {
 
       try {
         List<String> servers = ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.rsZNode);
-        if (servers == null || servers.size() == 0 || (servers.size() == 1
+        if (servers == null || servers.isEmpty() || (servers.size() == 1
             && servers.contains(sn.toString()))) {
           LOG.info("ZK shows there is only the master self online, exiting now");
           // Master could have lost some ZK events, no need to wait more.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
index 589da14..1c0c12f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
@@ -762,7 +762,7 @@ public class SplitLogManager {
           EnvironmentEdgeManager.currentTime()
               - getSplitLogManagerCoordination().getLastRecoveryTime();
       if (!failedRecoveringRegionDeletions.isEmpty()
-          || (tot == 0 && tasks.size() == 0 && (timeInterval > checkRecoveringTimeThreshold))) {
+          || (tot == 0 && tasks.isEmpty() && (timeInterval > checkRecoveringTimeThreshold))) {
         // inside the function there have more checks before GC anything
         if (!failedRecoveringRegionDeletions.isEmpty()) {
           List<Pair<Set<ServerName>, Boolean>> previouslyFailedDeletions =

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
index 673db95..a6a0774 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/SimpleLoadBalancer.java
@@ -484,7 +484,7 @@ public class SimpleLoadBalancer extends BaseLoadBalancer {
       setLoad(serverLoadList, i, balanceInfo.getNumRegionsAdded());
       if (balanceInfo.getHriList().size() + balanceInfo.getNumRegionsAdded() == max) {
         HRegionInfo hriToPlan;
-        if (balanceInfo.getHriList().size() == 0) {
+        if (balanceInfo.getHriList().isEmpty()) {
           LOG.debug("During balanceOverall, we found " + serverload.getServerName()
                   + " has no HRegionInfo, no operation needed");
           continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
index a47228b..b037c89 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java
@@ -423,7 +423,7 @@ public class CompactionTool extends Configured implements Tool {
       return 1;
     }
 
-    if (toCompactDirs.size() == 0) {
+    if (toCompactDirs.isEmpty()) {
       printUsage("No directories to compact specified.");
       return 1;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 5fc53d8..f35d788 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3698,7 +3698,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
         boolean matches = false;
         long cellTs = 0;
-        if (result.size() == 0 && valueIsNull) {
+        if (result.isEmpty() && valueIsNull) {
           matches = true;
         } else if (result.size() > 0 && result.get(0).getValueLength() == 0 && valueIsNull) {
           matches = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 808cd20..425667a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -522,7 +522,7 @@ public class HStore implements Store {
   }
 
   private List<StoreFile> openStoreFiles(Collection<StoreFileInfo> files) throws IOException {
-    if (files == null || files.size() == 0) {
+    if (files == null || files.isEmpty()) {
       return new ArrayList<StoreFile>();
     }
     // initialize the thread pool for opening store files in parallel..

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
index f0cb5d2..8b5aa31 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/ExploringCompactionPolicy.java
@@ -114,7 +114,7 @@ public class ExploringCompactionPolicy extends RatioBasedCompactionPolicy {
         }
       }
     }
-    if (bestSelection.size() == 0 && mightBeStuck) {
+    if (bestSelection.isEmpty() && mightBeStuck) {
       LOG.debug("Exploring compaction algorithm has selected " + smallest.size()
           + " files of size "+ smallestSize + " because the store might be stuck");
       return new ArrayList<StoreFile>(smallest);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
index ff1dd8e..a553cf6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/StripeCompactionPolicy.java
@@ -136,7 +136,7 @@ public class StripeCompactionPolicy extends CompactionPolicy {
       return selectNewStripesCompaction(si);
     }
 
-    boolean canDropDeletesNoL0 = l0Files.size() == 0;
+    boolean canDropDeletesNoL0 = l0Files.isEmpty();
     if (shouldCompactL0) {
       if (!canDropDeletesNoL0) {
         // If we need to compact L0, see if we can add something to it, and drop deletes.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
index 0ec3444..4ba685f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/LegacyScanQueryMatcher.java
@@ -371,7 +371,7 @@ public class LegacyScanQueryMatcher extends ScanQueryMatcher {
     int maxVersions = Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions());
     boolean hasNullColumn;
     ColumnTracker columnTracker;
-    if (columns == null || columns.size() == 0) {
+    if (columns == null || columns.isEmpty()) {
       // there is always a null column in the wildcard column query.
       hasNullColumn = true;
       // use a specialized scan for wildcard column tracker.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
index fcda4a6..407f3fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/querymatcher/UserScanQueryMatcher.java
@@ -198,7 +198,7 @@ public abstract class UserScanQueryMatcher extends ScanQueryMatcher {
         : Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions());
     boolean hasNullColumn;
     ColumnTracker columnTracker;
-    if (columns == null || columns.size() == 0) {
+    if (columns == null || columns.isEmpty()) {
       // there is always a null column in the wildcard column query.
       hasNullColumn = true;
       // use a specialized scan for wildcard column tracker.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
index 6cfd631..4dee9f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEditsReplaySink.java
@@ -94,7 +94,7 @@ public class WALEditsReplaySink {
    * @throws IOException on IO failure
    */
   public void replayEntries(List<Pair<HRegionLocation, Entry>> entries) throws IOException {
-    if (entries.size() == 0) {
+    if (entries.isEmpty()) {
       return;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
index 08888f8..bb8cbe85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/TableCFsUpdater.java
@@ -75,7 +75,7 @@ public class TableCFsUpdater extends ReplicationStateZKBase {
         String peerNode = getPeerNode(peerId);
         ReplicationPeerConfig rpc = getReplicationPeerConig(peerNode);
         // We only need to copy data from tableCFs node to rpc Node the first time hmaster start.
-        if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().size() == 0) {
+        if (rpc.getTableCFsMap() == null || rpc.getTableCFsMap().isEmpty()) {
           // we copy TableCFs node into PeerNode
           LOG.info("copy tableCFs into peerNode:" + peerId);
           ReplicationProtos.TableCF[] tableCFs =

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 4e4cf37..71f9f3d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -184,7 +184,7 @@ public class ReplicationSink {
                   CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(),
                       cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(),
                       cell.getRowLength());
-              List<UUID> clusterIds = new ArrayList<UUID>();
+              List<UUID> clusterIds = new ArrayList<UUID>(entry.getKey().getClusterIdsList().size());
               for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                 clusterIds.add(toUUID(clusterId));
               }
@@ -275,7 +275,7 @@ public class ReplicationSink {
 
   private void addFamilyAndItsHFilePathToTableInMap(byte[] family, String pathToHfileFromNS,
       List<Pair<byte[], List<String>>> familyHFilePathsList) {
-    List<String> hfilePaths = new ArrayList<String>();
+    List<String> hfilePaths = new ArrayList<String>(1);
     hfilePaths.add(pathToHfileFromNS);
     familyHFilePathsList.add(new Pair<byte[], List<String>>(family, hfilePaths));
   }
@@ -283,7 +283,7 @@ public class ReplicationSink {
   private void addNewTableEntryInMap(
       final Map<String, List<Pair<byte[], List<String>>>> bulkLoadHFileMap, byte[] family,
       String pathToHfileFromNS, String tableName) {
-    List<String> hfilePaths = new ArrayList<String>();
+    List<String> hfilePaths = new ArrayList<String>(1);
     hfilePaths.add(pathToHfileFromNS);
     Pair<byte[], List<String>> newFamilyHFilePathsPair =
         new Pair<byte[], List<String>>(family, hfilePaths);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
index 7a229eb..1eaa84c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSource.java
@@ -614,7 +614,7 @@ public class ReplicationSource extends Thread
         //We take the snapshot now so that we are protected against races
         //where a new file gets enqueued while the current file is being processed
         //(and where we just finished reading the current file).
-        if (!this.replicationQueueInfo.isQueueRecovered() && queue.size() == 0) {
+        if (!this.replicationQueueInfo.isQueueRecovered() && queue.isEmpty()) {
           currentWALisBeingWrittenTo = true;
         }
         // Open a reader on it
@@ -1075,7 +1075,7 @@ public class ReplicationSource extends Thread
      */
     private boolean isCurrentLogEmpty() {
       return (this.repLogReader.getPosition() == 0 &&
-          !this.replicationQueueInfo.isQueueRecovered() && queue.size() == 0);
+          !this.replicationQueueInfo.isQueueRecovered() && queue.isEmpty());
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 5b574da..5cb7d75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -780,7 +780,7 @@ public class ReplicationSourceManager implements ReplicationListener {
     @Override
     public void run() {
       List<String> currentReplicators = replicationQueues.getListOfReplicators();
-      if (currentReplicators == null || currentReplicators.size() == 0) {
+      if (currentReplicators == null || currentReplicators.isEmpty()) {
         return;
       }
       List<String> otherRegionServers = replicationTracker.getListOfRegionServers();

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index f1aec09..9abb3a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -553,7 +553,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
       Byte deleteTagsFormat) throws IOException {
     if ((deleteTagsFormat != null && deleteTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)
         && (putTagsFormat == null || putTagsFormat == SORTED_ORDINAL_SERIALIZATION_FORMAT)) {
-      if (putVisTags.size() == 0) {
+      if (putVisTags.isEmpty()) {
         // Early out if there are no tags in the cell
         return false;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
index d1f5372..98afe8b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
@@ -399,7 +399,7 @@ public class RestoreSnapshotHelper {
    */
   private void removeHdfsRegions(final ThreadPoolExecutor exec, final List<HRegionInfo> regions)
       throws IOException {
-    if (regions == null || regions.size() == 0) return;
+    if (regions == null || regions.isEmpty()) return;
     ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
       @Override
       public void editRegion(final HRegionInfo hri) throws IOException {
@@ -414,7 +414,7 @@ public class RestoreSnapshotHelper {
   private void restoreHdfsRegions(final ThreadPoolExecutor exec,
       final Map<String, SnapshotRegionManifest> regionManifests,
       final List<HRegionInfo> regions) throws IOException {
-    if (regions == null || regions.size() == 0) return;
+    if (regions == null || regions.isEmpty()) return;
     ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
       @Override
       public void editRegion(final HRegionInfo hri) throws IOException {
@@ -429,7 +429,7 @@ public class RestoreSnapshotHelper {
   private void restoreHdfsMobRegions(final ThreadPoolExecutor exec,
       final Map<String, SnapshotRegionManifest> regionManifests,
       final List<HRegionInfo> regions) throws IOException {
-    if (regions == null || regions.size() == 0) return;
+    if (regions == null || regions.isEmpty()) return;
     ModifyRegionUtils.editRegions(exec, regions, new ModifyRegionUtils.RegionEditTask() {
       @Override
       public void editRegion(final HRegionInfo hri) throws IOException {
@@ -562,7 +562,7 @@ public class RestoreSnapshotHelper {
   private HRegionInfo[] cloneHdfsRegions(final ThreadPoolExecutor exec,
       final Map<String, SnapshotRegionManifest> regionManifests,
       final List<HRegionInfo> regions) throws IOException {
-    if (regions == null || regions.size() == 0) return null;
+    if (regions == null || regions.isEmpty()) return null;
 
     final Map<String, HRegionInfo> snapshotRegions =
       new HashMap<String, HRegionInfo>(regions.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
index 3fb445c..85d3af3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotInfo.java
@@ -686,7 +686,7 @@ public final class SnapshotInfo extends AbstractHBaseTool {
     List<SnapshotDescription> snapshotList = getSnapshotList(conf);
 
 
-    if (snapshotList.size() == 0) {
+    if (snapshotList.isEmpty()) {
       return Collections.emptyMap();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
index e74982d..47e3073 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
@@ -425,7 +425,7 @@ public final class SnapshotManifest {
    * This is an helper to get a map with the region encoded name
    */
   public Map<String, SnapshotRegionManifest> getRegionManifestsMap() {
-    if (regionManifests == null || regionManifests.size() == 0) return null;
+    if (regionManifests == null || regionManifests.isEmpty()) return null;
 
     HashMap<String, SnapshotRegionManifest> regionsMap =
         new HashMap<String, SnapshotRegionManifest>(regionManifests.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
index b250229..8cd438e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotReferenceUtil.java
@@ -112,7 +112,7 @@ public final class SnapshotReferenceUtil {
       throws IOException {
     SnapshotManifest manifest = SnapshotManifest.open(conf, fs, snapshotDir, desc);
     List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
-    if (regionManifests == null || regionManifests.size() == 0) {
+    if (regionManifests == null || regionManifests.isEmpty()) {
       LOG.debug("No manifest files present: " + snapshotDir);
       return;
     }
@@ -184,7 +184,7 @@ public final class SnapshotReferenceUtil {
 
     final Path snapshotDir = manifest.getSnapshotDir();
     List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
-    if (regionManifests == null || regionManifests.size() == 0) {
+    if (regionManifests == null || regionManifests.isEmpty()) {
       LOG.debug("No manifest files present: " + snapshotDir);
       return;
     }
@@ -205,7 +205,7 @@ public final class SnapshotReferenceUtil {
     final Path snapshotDir = manifest.getSnapshotDir();
 
     List<SnapshotRegionManifest> regionManifests = manifest.getRegionManifests();
-    if (regionManifests == null || regionManifests.size() == 0) {
+    if (regionManifests == null || regionManifests.isEmpty()) {
       LOG.debug("No manifest files present: " + snapshotDir);
       return;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 3d77ca1..c6d3e80 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -1318,7 +1318,7 @@ public final class Canary implements Tool {
             "option, tablenames:" + foundTableNames.toString());
         this.errorCode = USAGE_EXIT_CODE;
       }
-      return foundTableNames.size() == 0;
+      return foundTableNames.isEmpty();
     }
 
     private void monitorRegionServers(Map<String, List<HRegionInfo>> rsAndRMap) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 553f756..f989d09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -1331,7 +1331,7 @@ public class HBaseFsck extends Configured implements Closeable {
   public void fixOrphanTables() throws IOException {
     if (shouldFixTableOrphans() && !orphanTableDirs.isEmpty()) {
 
-      List<TableName> tmpList = new ArrayList<TableName>();
+      List<TableName> tmpList = new ArrayList<TableName>(orphanTableDirs.keySet().size());
       tmpList.addAll(orphanTableDirs.keySet());
       HTableDescriptor[] htds = getHTableDescriptors(tmpList);
       Iterator<Entry<TableName, Set<String>>> iter =
@@ -2531,7 +2531,7 @@ public class HBaseFsck extends Configured implements Closeable {
       // the region chain in META
       //if (hbi.foundRegionDir == null) continue;
       //if (hbi.deployedOn.size() != 1) continue;
-      if (hbi.deployedOn.size() == 0) continue;
+      if (hbi.deployedOn.isEmpty()) continue;
 
       // We should be safe here
       TableName tableName = hbi.metaEntry.getTable();
@@ -3089,7 +3089,7 @@ public class HBaseFsck extends Configured implements Closeable {
       byte[] prevKey = null;
       byte[] problemKey = null;
 
-      if (splits.size() == 0) {
+      if (splits.isEmpty()) {
         // no region for this table
         handler.handleHoleInRegionChain(HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
       }
@@ -3145,7 +3145,7 @@ public class HBaseFsck extends Configured implements Closeable {
             }
           }
 
-        } else if (ranges.size() == 0) {
+        } else if (ranges.isEmpty()) {
           if (problemKey != null) {
             LOG.warn("reached end of problem group: " + Bytes.toStringBinary(key));
           }
@@ -3377,7 +3377,7 @@ public class HBaseFsck extends Configured implements Closeable {
       }
       if (servers.size() != 1) {
         noProblem = false;
-        if (servers.size() == 0) {
+        if (servers.isEmpty()) {
           assignMetaReplica(i);
         } else if (servers.size() > 1) {
           errors
@@ -4466,7 +4466,7 @@ public class HBaseFsck extends Configured implements Closeable {
    * Empty list means all tables are included.
    */
   boolean isTableIncluded(TableName table) {
-    return (tablesIncluded.size() == 0) || tablesIncluded.contains(table);
+    return (tablesIncluded.isEmpty()) || tablesIncluded.contains(table);
   }
 
   public void includeTable(TableName table) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index cc84093..7f283e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -118,7 +118,7 @@ public class IdLock {
 
   /** For testing */
   void assertMapEmpty() {
-    assert map.size() == 0;
+    assert map.isEmpty();
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
index 08fe076..01ee201 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java
@@ -463,17 +463,17 @@ public class RegionMover extends AbstractHBaseTool {
       boolean ack, List<HRegionInfo> movedRegions) throws Exception {
     List<HRegionInfo> regionsToMove = new ArrayList<HRegionInfo>();// FindBugs: DLS_DEAD_LOCAL_STORE
     regionsToMove = getRegions(this.conf, server);
-    if (regionsToMove.size() == 0) {
+    if (regionsToMove.isEmpty()) {
       LOG.info("No Regions to move....Quitting now");
       return;
-    } else if (regionServers.size() == 0) {
+    } else if (regionServers.isEmpty()) {
       LOG.warn("No Regions were moved - no servers available");
       throw new Exception("No online region servers");
     }
     while (true) {
       regionsToMove = getRegions(this.conf, server);
       regionsToMove.removeAll(movedRegions);
-      if (regionsToMove.size() == 0) {
+      if (regionsToMove.isEmpty()) {
         break;
       }
       int counter = 0;
@@ -823,7 +823,7 @@ public class RegionMover extends AbstractHBaseTool {
   private ArrayList<String> getServers(Admin admin) throws IOException {
     ArrayList<ServerName> serverInfo =
         new ArrayList<ServerName>(admin.getClusterStatus().getServers());
-    ArrayList<String> regionServers = new ArrayList<String>();
+    ArrayList<String> regionServers = new ArrayList<String>(serverInfo.size());
     for (ServerName server : serverInfo) {
       regionServers.add(server.getServerName());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index c0ce639..820da7a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -174,7 +174,7 @@ public class HFileCorruptionChecker {
 
     List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
     // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
-    if (hfs.size() == 0 && !fs.exists(cfDir)) {
+    if (hfs.isEmpty() && !fs.exists(cfDir)) {
       LOG.warn("Colfam Directory " + cfDir +
           " does not exist.  Likely due to concurrent split/compaction. Skipping.");
       missing.add(cfDir);
@@ -207,7 +207,7 @@ public class HFileCorruptionChecker {
 
     List<FileStatus> hfs = FSUtils.filterFileStatuses(statuses, new HFileFilter(fs));
     // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
-    if (hfs.size() == 0 && !fs.exists(cfDir)) {
+    if (hfs.isEmpty() && !fs.exists(cfDir)) {
       LOG.warn("Mob colfam Directory " + cfDir +
           " does not exist.  Likely the table is deleted. Skipping.");
       missedMobFiles.add(cfDir);
@@ -311,7 +311,7 @@ public class HFileCorruptionChecker {
 
     List<FileStatus> cfs = FSUtils.filterFileStatuses(statuses, new FamilyDirFilter(fs));
     // Hadoop 1.0 listStatus does not throw an exception if the path does not exist.
-    if (cfs.size() == 0 && !fs.exists(regionDir)) {
+    if (cfs.isEmpty() && !fs.exists(regionDir)) {
       LOG.warn("Region Directory " + regionDir +
           " does not exist.  Likely due to concurrent split/compaction. Skipping.");
       missing.add(regionDir);
@@ -343,7 +343,7 @@ public class HFileCorruptionChecker {
     }
 
     // Parallelize check at the region dir level
-    List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>();
+    List<RegionDirChecker> rdcs = new ArrayList<RegionDirChecker>(rds.size() + 1);
     List<Future<Void>> rdFutures;
 
     for (FileStatus rdFs : rds) {
@@ -541,7 +541,7 @@ public class HFileCorruptionChecker {
       out.print("      " + mq);
     }
 
-    String initialState = (corrupted.size() == 0) ? "OK" : "CORRUPTED";
+    String initialState = (corrupted.isEmpty()) ? "OK" : "CORRUPTED";
     String fixedState = (corrupted.size() == quarantined.size()) ? "OK"
         : "CORRUPTED";
 
@@ -560,7 +560,7 @@ public class HFileCorruptionChecker {
     for (Path mq : missedMobFiles) {
       out.print("      " + mq);
     }
-    String initialMobState = (corruptedMobFiles.size() == 0) ? "OK" : "CORRUPTED";
+    String initialMobState = (corruptedMobFiles.isEmpty()) ? "OK" : "CORRUPTED";
     String fixedMobState = (corruptedMobFiles.size() == quarantinedMobFiles.size()) ? "OK"
         : "CORRUPTED";
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
index 76a6415..5a801c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/AbstractFSWALProvider.java
@@ -115,7 +115,7 @@ public abstract class AbstractFSWALProvider<T extends AbstractFSWAL<?>> implemen
     if (wal == null) {
       return Collections.emptyList();
     }
-    List<WAL> wals = new ArrayList<WAL>();
+    List<WAL> wals = new ArrayList<WAL>(1);
     wals.add(wal);
     return wals;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 7f10d7d..337f2b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -67,7 +67,7 @@ class DisabledWALProvider implements WALProvider {
 
   @Override
   public List<WAL> getWALs() throws IOException {
-    List<WAL> wals = new ArrayList<WAL>();
+    List<WAL> wals = new ArrayList<WAL>(1);
     wals.add(disabled);
     return wals;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
index 3bf01bc..63e6649 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java
@@ -229,7 +229,7 @@ public class WALKey implements SequenceId, Comparable<WALKey> {
   public WALKey(final byte[] encodedRegionName, final TableName tablename,
                 long logSeqNum,
       final long now, UUID clusterId) {
-    List<UUID> clusterIds = new ArrayList<UUID>();
+    List<UUID> clusterIds = new ArrayList<UUID>(1);
     clusterIds.add(clusterId);
     init(encodedRegionName, tablename, logSeqNum, now, clusterIds,
         HConstants.NO_NONCE, HConstants.NO_NONCE, null, null);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
index b212fe6..a6fd85f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALPrettyPrinter.java
@@ -293,7 +293,7 @@ public class WALPrettyPrinter {
             actions.add(op);
           }
         }
-        if (actions.size() == 0)
+        if (actions.isEmpty())
           continue;
         txn.put("actions", actions);
         if (outputJSON) {
@@ -381,7 +381,7 @@ public class WALPrettyPrinter {
     try {
       CommandLine cmd = parser.parse(options, args);
       files = cmd.getArgList();
-      if (files.size() == 0 || cmd.hasOption("h")) {
+      if (files.isEmpty() || cmd.hasOption("h")) {
         HelpFormatter formatter = new HelpFormatter();
         formatter.printHelp("WAL <filename...>", options, true);
         System.exit(-1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
index ab74318..ff73073 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/MiniZooKeeperCluster.java
@@ -350,7 +350,7 @@ public class MiniZooKeeperCluster {
     LOG.info("Kill the current active ZK servers in the cluster " +
         "on client port: " + clientPort);
 
-    if (standaloneServerFactoryList.size() == 0) {
+    if (standaloneServerFactoryList.isEmpty()) {
       // there is no backup servers;
       return -1;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
index b0a6137..18a4d86 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestPartialResultsFromClientSide.java
@@ -681,7 +681,7 @@ public class TestPartialResultsFromClientSide {
       LOG.info("Actual count: " + result.size());
     }
 
-    if (expKvList.size() == 0) return;
+    if (expKvList.isEmpty()) return;
 
     int i = 0;
     for (Cell kv : result.rawCells()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
index ae93e67..ff7d211 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
@@ -2233,7 +2233,7 @@ public class TestFromClientSide {
       get = new Get(bytes);
       get.addFamily(FAMILIES[0]);
       result = ht.get(get);
-      assertTrue(result.size() == 0);
+      assertTrue(result.isEmpty());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 8862109..c48ec31 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -744,7 +744,7 @@ public class TestScannersFromClientSide {
     LOG.info(msg);
     LOG.info("Expected count: " + expKvList.size());
     LOG.info("Actual count: " + result.size());
-    if (expKvList.size() == 0)
+    if (expKvList.isEmpty())
       return;
 
     int i = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
index 5078038..59ccbcf 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
@@ -466,7 +466,7 @@ public class TestDistributedLogSplitting {
       public boolean evaluate() throws Exception {
         List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
             zkw.znodePaths.recoveringRegionsZNode, false);
-        return (recoveringRegions != null && recoveringRegions.size() == 0);
+        return (recoveringRegions != null && recoveringRegions.isEmpty());
       }
     });
   }
@@ -573,7 +573,7 @@ public class TestDistributedLogSplitting {
         public boolean evaluate() throws Exception {
           List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
               zkw.znodePaths.recoveringRegionsZNode, false);
-          boolean done = recoveringRegions != null && recoveringRegions.size() == 0;
+          boolean done = recoveringRegions != null && recoveringRegions.isEmpty();
           if (!done) {
             LOG.info("Recovering regions: " + recoveringRegions);
           }
@@ -665,7 +665,7 @@ public class TestDistributedLogSplitting {
         public boolean evaluate() throws Exception {
           List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
               zkw.znodePaths.recoveringRegionsZNode, false);
-          return (recoveringRegions != null && recoveringRegions.size() == 0);
+          return (recoveringRegions != null && recoveringRegions.isEmpty());
         }
       });
 
@@ -717,7 +717,7 @@ public class TestDistributedLogSplitting {
       TEST_UTIL.waitFor(60000, 1000, new Waiter.Predicate<Exception>() {
         @Override
         public boolean evaluate() throws Exception {
-          return (tmphrs.getRecoveringRegions().size() == 0);
+          return (tmphrs.getRecoveringRegions().isEmpty());
         }
       });
     } finally {
@@ -868,7 +868,7 @@ public class TestDistributedLogSplitting {
               zkw.znodePaths.recoveringRegionsZNode, false);
           ServerManager serverManager = master.getServerManager();
           return (!serverManager.areDeadServersInProgress() &&
-              recoveringRegions != null && recoveringRegions.size() == 0);
+              recoveringRegions != null && recoveringRegions.isEmpty());
         }
       });
 
@@ -1111,7 +1111,7 @@ public class TestDistributedLogSplitting {
         public boolean evaluate() throws Exception {
           List<String> recoveringRegions = zkw.getRecoverableZooKeeper().getChildren(
               zkw.znodePaths.recoveringRegionsZNode, false);
-          return (recoveringRegions != null && recoveringRegions.size() == 0);
+          return (recoveringRegions != null && recoveringRegions.isEmpty());
         }
       });
 
@@ -1286,7 +1286,7 @@ public class TestDistributedLogSplitting {
           it.remove();
         }
       }
-      if (regions.size() == 0) return;
+      if (regions.isEmpty()) return;
       HRegionInfo curRegionInfo = regions.get(0);
       byte[] startRow = curRegionInfo.getStartKey();
       if (startRow == null || startRow.length == 0) {
@@ -1381,7 +1381,7 @@ public class TestDistributedLogSplitting {
           it.remove();
         }
       }
-      if (regions.size() == 0) return;
+      if (regions.isEmpty()) return;
       HRegionInfo curRegionInfo = regions.get(0);
       byte[] startRow = curRegionInfo.getStartKey();
       if (startRow == null || startRow.length == 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
index 78572fd..b2be237 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionPlacement.java
@@ -150,13 +150,13 @@ public class TestRegionPlacement {
     rp.setTargetTableName(new String[]{tableStr});
     List<AssignmentVerificationReport> reports = rp.verifyRegionPlacement(false);
     AssignmentVerificationReport report = reports.get(0);
-    assertTrue(report.getRegionsWithoutValidFavoredNodes().size() == 0);
-    assertTrue(report.getNonFavoredAssignedRegions().size() == 0);
+    assertTrue(report.getRegionsWithoutValidFavoredNodes().isEmpty());
+    assertTrue(report.getNonFavoredAssignedRegions().isEmpty());
     assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM);
     assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) != 0);
     assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.SECONDARY) == 0);
     assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.TERTIARY) == 0);
-    assertTrue(report.getUnassignedRegions().size() == 0);
+    assertTrue(report.getUnassignedRegions().isEmpty());
 
     // Check when a RS stops, the regions get assigned to their secondary/tertiary
     killRandomServerAndVerifyAssignment();
@@ -164,8 +164,8 @@ public class TestRegionPlacement {
     // also verify that the AssignmentVerificationReport has the correct information
     reports = rp.verifyRegionPlacement(false);
     report = reports.get(0);
-    assertTrue(report.getRegionsWithoutValidFavoredNodes().size() == 0);
-    assertTrue(report.getNonFavoredAssignedRegions().size() == 0);
+    assertTrue(report.getRegionsWithoutValidFavoredNodes().isEmpty());
+    assertTrue(report.getNonFavoredAssignedRegions().isEmpty());
     assertTrue(report.getTotalFavoredAssignments() >= REGION_NUM);
     assertTrue(report.getNumRegionsOnFavoredNodeByPosition(FavoredNodesPlan.Position.PRIMARY) > 0);
     assertTrue("secondary " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
index eca7703..c59f64b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestColumnSeeking.java
@@ -147,7 +147,7 @@ public class TestColumnSeeking {
         Scan scan = new Scan();
         scan.setMaxVersions();
         if (i < numberOfTests) {
-          if (columnLists[i].size() == 0) continue; // HBASE-7700
+          if (columnLists[i].isEmpty()) continue; // HBASE-7700
           kvSet = kvMaps[i].values();
           for (String column : columnLists[i]) {
             scan.addColumn(familyBytes, Bytes.toBytes(column));
@@ -259,7 +259,7 @@ public class TestColumnSeeking {
       Scan scan = new Scan();
       scan.setMaxVersions();
       if (i < numberOfTests) {
-        if (columnLists[i].size() == 0) continue; // HBASE-7700
+        if (columnLists[i].isEmpty()) continue; // HBASE-7700
         kvSet = kvMaps[i].values();
         for (String column : columnLists[i]) {
           scan.addColumn(familyBytes, Bytes.toBytes(column));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
index 1513cd0..4fa18b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultCompactSelection.java
@@ -176,7 +176,7 @@ public class TestDefaultCompactSelection extends TestCompactionPolicy {
     CompactionRequest result = ((RatioBasedCompactionPolicy) store.storeEngine
         .getCompactionPolicy()).selectCompaction(candidates,
         new ArrayList<StoreFile>(), false, false, false);
-    Assert.assertTrue(result.getFiles().size() == 0);
+    Assert.assertTrue(result.getFiles().isEmpty());
     store.setScanInfo(oldScanInfo);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
index e2a57eb..c722621 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEndToEndSplitTransaction.java
@@ -152,7 +152,7 @@ public class TestEndToEndSplitTransaction {
         for (int i= 0; i< 5; i++) {
           List<HRegionInfo> regions =
               MetaTableAccessor.getTableRegions(connection, tableName, true);
-          if (regions.size() == 0) {
+          if (regions.isEmpty()) {
             continue;
           }
           int regionIndex = random.nextInt(regions.size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index d40a684..ba27622 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -4756,7 +4756,7 @@ public class TestHRegion {
       primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles);
       Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem()
           .getStoreFiles(families[0]);
-      Assert.assertTrue(storeFileInfos == null || storeFileInfos.size() == 0);
+      Assert.assertTrue(storeFileInfos == null || storeFileInfos.isEmpty());
 
       verifyData(secondaryRegion, 0, 1000, cq, families);
     } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 364f355..043473a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -229,7 +229,7 @@ public class TestSplitTransactionOnCluster {
     htd.addFamily(new HColumnDescriptor(cf));
     admin.createTable(htd);
 
-    for (int i = 0; cluster.getRegions(tableName).size() == 0 && i < 100; i++) {
+    for (int i = 0; cluster.getRegions(tableName).isEmpty() && i < 100; i++) {
       Thread.sleep(100);
     }
     assertEquals(1, cluster.getRegions(tableName).size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java
index c23e794..aa5a20e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCompactedHFilesDischarger.java
@@ -77,7 +77,7 @@ public class TestCompactedHFilesDischarger {
     Path path = testUtil.getDataTestDir(getClass().getSimpleName());
     region = HBaseTestingUtility.createRegionAndWAL(info, path, testUtil.getConfiguration(), htd);
     rss = mock(RegionServerServices.class);
-    List<Region> regions = new ArrayList<Region>();
+    List<Region> regions = new ArrayList<Region>(1);
     regions.add(region);
     when(rss.getOnlineRegions()).thenReturn(regions);
   }
@@ -153,7 +153,7 @@ public class TestCompactedHFilesDischarger {
       assertFalse(file.isCompactedAway());
     }
     compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
-    assertTrue(compactedfiles.size() == 0);
+    assertTrue(compactedfiles.isEmpty());
     
   }
 
@@ -225,7 +225,7 @@ public class TestCompactedHFilesDischarger {
       assertFalse(file.isCompactedAway());
     }
     compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
-    assertTrue(compactedfiles.size() == 0);
+    assertTrue(compactedfiles.isEmpty());
   }
 
   @Test
@@ -333,7 +333,7 @@ public class TestCompactedHFilesDischarger {
       assertFalse(file.isCompactedAway());
     }
     compactedfiles = ((HStore) store).getStoreEngine().getStoreFileManager().getCompactedfiles();
-    assertTrue(compactedfiles.size() == 0);
+    assertTrue(compactedfiles.isEmpty());
   }
 
   protected void countDown() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
index fa6e62a..5fadee8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
@@ -666,7 +666,7 @@ public class TestStripeCompactionPolicy {
    */
   private static StripeInformationProvider createStripesWithSizes(
       int l0Count, long l0Size, Long[]... sizes) throws Exception {
-    ArrayList<List<Long>> sizeList = new ArrayList<List<Long>>();
+    ArrayList<List<Long>> sizeList = new ArrayList<List<Long>>(sizes.length);
     for (Long[] size : sizes) {
       sizeList.add(Arrays.asList(size));
     }
@@ -695,7 +695,7 @@ public class TestStripeCompactionPolicy {
       List<List<Long>> stripeSizes, List<Long> l0Sizes) throws Exception {
     List<List<StoreFile>> stripeFiles = new ArrayList<List<StoreFile>>(stripeSizes.size());
     for (List<Long> sizes : stripeSizes) {
-      List<StoreFile> sfs = new ArrayList<StoreFile>();
+      List<StoreFile> sfs = new ArrayList<StoreFile>(sizes.size());
       for (Long size : sizes) {
         sfs.add(createFile(size));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java
index 97331e8..4b82940 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactor.java
@@ -177,7 +177,7 @@ public class TestStripeCompactor {
       null, NoLimitThroughputController.INSTANCE, null);
     assertEquals(output.length, paths.size());
     writers.verifyKvs(output, true, true);
-    List<byte[]> boundaries = new ArrayList<byte[]>();
+    List<byte[]> boundaries = new ArrayList<byte[]>(output.length + 2);
     boundaries.add(left);
     for (int i = 1; i < output.length; ++i) {
       boundaries.add(CellUtil.cloneRow(output[i][0]));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java
index a7e0667..3480597 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestExplicitColumnTracker.java
@@ -49,7 +49,7 @@ public class TestExplicitColumnTracker {
     ColumnTracker exp = new ExplicitColumnTracker(trackColumns, 0, maxVersions, Long.MIN_VALUE);
 
     // Initialize result
-    List<ScanQueryMatcher.MatchCode> result = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<ScanQueryMatcher.MatchCode> result = new ArrayList<ScanQueryMatcher.MatchCode>(scannerColumns.size());
 
     long timestamp = 0;
     // "Match"
@@ -71,7 +71,7 @@ public class TestExplicitColumnTracker {
     // Looking for every other
     columns.add(col2);
     columns.add(col4);
-    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>(5);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); // col1
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL); // col2
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL); // col3
@@ -80,7 +80,7 @@ public class TestExplicitColumnTracker {
     int maxVersions = 1;
 
     // Create "Scanner"
-    List<byte[]> scanner = new ArrayList<byte[]>();
+    List<byte[]> scanner = new ArrayList<byte[]>(5);
     scanner.add(col1);
     scanner.add(col2);
     scanner.add(col3);
@@ -98,7 +98,7 @@ public class TestExplicitColumnTracker {
     columns.add(col2);
     columns.add(col4);
 
-    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>(15);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
@@ -121,7 +121,7 @@ public class TestExplicitColumnTracker {
     int maxVersions = 2;
 
     // Create "Scanner"
-    List<byte[]> scanner = new ArrayList<byte[]>();
+    List<byte[]> scanner = new ArrayList<byte[]>(15);
     scanner.add(col1);
     scanner.add(col1);
     scanner.add(col1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java
index 044988b..2852947 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestScanWildcardColumnTracker.java
@@ -44,20 +44,20 @@ public class TestScanWildcardColumnTracker {
     ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE);
 
     // Create list of qualifiers
-    List<byte[]> qualifiers = new ArrayList<byte[]>();
+    List<byte[]> qualifiers = new ArrayList<byte[]>(4);
     qualifiers.add(Bytes.toBytes("qualifier1"));
     qualifiers.add(Bytes.toBytes("qualifier2"));
     qualifiers.add(Bytes.toBytes("qualifier3"));
     qualifiers.add(Bytes.toBytes("qualifier4"));
 
     // Setting up expected result
-    List<MatchCode> expected = new ArrayList<MatchCode>();
+    List<MatchCode> expected = new ArrayList<MatchCode>(4);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
 
-    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<MatchCode>();
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<MatchCode>(qualifiers.size());
 
     for (byte[] qualifier : qualifiers) {
       ScanQueryMatcher.MatchCode mc = ScanQueryMatcher.checkColumn(tracker, qualifier, 0,
@@ -76,20 +76,20 @@ public class TestScanWildcardColumnTracker {
     ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE);
 
     // Create list of qualifiers
-    List<byte[]> qualifiers = new ArrayList<byte[]>();
+    List<byte[]> qualifiers = new ArrayList<byte[]>(4);
     qualifiers.add(Bytes.toBytes("qualifier1"));
     qualifiers.add(Bytes.toBytes("qualifier1"));
     qualifiers.add(Bytes.toBytes("qualifier1"));
     qualifiers.add(Bytes.toBytes("qualifier2"));
 
     // Setting up expected result
-    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<MatchCode>();
+    List<ScanQueryMatcher.MatchCode> expected = new ArrayList<MatchCode>(4);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
 
-    List<MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>(qualifiers.size());
 
     long timestamp = 0;
     for (byte[] qualifier : qualifiers) {
@@ -109,7 +109,7 @@ public class TestScanWildcardColumnTracker {
     ScanWildcardColumnTracker tracker = new ScanWildcardColumnTracker(0, VERSIONS, Long.MIN_VALUE);
 
     // Create list of qualifiers
-    List<byte[]> qualifiers = new ArrayList<byte[]>();
+    List<byte[]> qualifiers = new ArrayList<byte[]>(2);
     qualifiers.add(Bytes.toBytes("qualifier2"));
     qualifiers.add(Bytes.toBytes("qualifier1"));
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
index 04c3611..0831404 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/querymatcher/TestUserScanQueryMatcher.java
@@ -69,7 +69,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
     // of just byte []
 
     // Expected result
-    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>(6);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE_AND_SEEK_NEXT_COL);
     expected.add(ScanQueryMatcher.MatchCode.SEEK_NEXT_COL);
@@ -83,7 +83,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
       new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator),
       get.getFamilyMap().get(fam2), now - ttl, now, null);
 
-    List<KeyValue> memstore = new ArrayList<KeyValue>();
+    List<KeyValue> memstore = new ArrayList<KeyValue>(6);
     memstore.add(new KeyValue(row1, fam2, col1, 1, data));
     memstore.add(new KeyValue(row1, fam2, col2, 1, data));
     memstore.add(new KeyValue(row1, fam2, col3, 1, data));
@@ -92,7 +92,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
 
     memstore.add(new KeyValue(row2, fam1, col1, data));
 
-    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>(memstore.size());
     KeyValue k = memstore.get(0);
     qm.setToNewRow(k);
 
@@ -113,7 +113,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
     // of just byte []
 
     // Expected result
-    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<MatchCode> expected = new ArrayList<ScanQueryMatcher.MatchCode>(6);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
     expected.add(ScanQueryMatcher.MatchCode.INCLUDE);
@@ -126,7 +126,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
       new ScanInfo(this.conf, fam2, 0, 1, ttl, KeepDeletedCells.FALSE, 0, rowComparator), null,
       now - ttl, now, null);
 
-    List<KeyValue> memstore = new ArrayList<KeyValue>();
+    List<KeyValue> memstore = new ArrayList<KeyValue>(6);
     memstore.add(new KeyValue(row1, fam2, col1, 1, data));
     memstore.add(new KeyValue(row1, fam2, col2, 1, data));
     memstore.add(new KeyValue(row1, fam2, col3, 1, data));
@@ -134,7 +134,7 @@ public class TestUserScanQueryMatcher extends AbstractTestScanQueryMatcher {
     memstore.add(new KeyValue(row1, fam2, col5, 1, data));
     memstore.add(new KeyValue(row2, fam1, col1, 1, data));
 
-    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>();
+    List<ScanQueryMatcher.MatchCode> actual = new ArrayList<ScanQueryMatcher.MatchCode>(memstore.size());
 
     KeyValue k = memstore.get(0);
     qm.setToNewRow(k);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
index 19759d1..b5c464e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/AbstractTestFSWAL.java
@@ -399,7 +399,7 @@ public abstract class AbstractTestFSWAL {
         }
       }
       // Add any old cluster id.
-      List<UUID> clusterIds = new ArrayList<UUID>();
+      List<UUID> clusterIds = new ArrayList<UUID>(1);
       clusterIds.add(UUID.randomUUID());
       // Now make appends run slow.
       goslow.set(true);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
index db2c241..19c534e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestLogRolling.java
@@ -245,7 +245,7 @@ public class TestLogRolling extends AbstractTestLogRolling {
       server = TEST_UTIL.getRSForFirstRegionInTable(desc.getTableName());
       HRegionInfo region = server.getOnlineRegions(desc.getTableName()).get(0).getRegionInfo();
       final WAL log = server.getWAL(region);
-      final List<Path> paths = new ArrayList<Path>();
+      final List<Path> paths = new ArrayList<Path>(1);
       final List<Integer> preLogRolledCalled = new ArrayList<Integer>();
 
       paths.add(AbstractFSWALProvider.getCurrentFileName(log));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
index b6bb7a0..52f8fe7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestWALActionsListener.java
@@ -85,7 +85,7 @@ public class TestWALActionsListener {
   @Test
   public void testActionListener() throws Exception {
     DummyWALActionsListener observer = new DummyWALActionsListener();
-    List<WALActionsListener> list = new ArrayList<WALActionsListener>();
+    List<WALActionsListener> list = new ArrayList<WALActionsListener>(1);
     list.add(observer);
     final WALFactory wals = new WALFactory(conf, list, "testActionListener");
     DummyWALActionsListener laterobserver = new DummyWALActionsListener();

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
index 7ac5e94..c1a9e25 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMasterReplication.java
@@ -195,7 +195,7 @@ public class TestMasterReplication {
         ServerLoad serverLoad = clusterStatus.getLoad(rsName);
         List<ReplicationLoadSource> replicationLoadSourceList =
             serverLoad.getReplicationLoadSourceList();
-        return replicationLoadSourceList.size() == 0;
+        return replicationLoadSourceList.isEmpty();
       }
     });
 
@@ -673,7 +673,7 @@ public class TestMasterReplication {
             + ". IsDeleteReplication:" + isDeleted);
       }
       Result res = target.get(get);
-      boolean sleep = isDeleted ? res.size() > 0 : res.size() == 0;
+      boolean sleep = isDeleted ? res.size() > 0 : res.isEmpty();
       if (sleep) {
         LOG.info("Waiting for more time for replication. Row:"
             + Bytes.toString(row) + ". IsDeleteReplication:" + isDeleted);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
index cddc055..29e7d67 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestMultiSlaveReplication.java
@@ -321,7 +321,7 @@ public class TestMultiSlaveReplication {
       boolean replicatedToAll = true;
       for (Table target : targets) {
         Result res = target.get(get);
-        if (res.size() == 0) {
+        if (res.isEmpty()) {
           LOG.info("Row not available");
           replicatedToAll = false;
           break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
index ee9b0cb..3814562 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestNamespaceReplication.java
@@ -214,7 +214,7 @@ public class TestNamespaceReplication extends TestReplicationBase {
           fail("Waited too much time for put replication");
         }
         Result res = target.get(get);
-        if (res.size() == 0) {
+        if (res.isEmpty()) {
           LOG.info("Row not available");
         } else {
           assertEquals(res.size(), 1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
index bd70be0..b73dc80 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestPerTableCFReplication.java
@@ -569,7 +569,7 @@ public class TestPerTableCFReplication {
       boolean replicatedToAll = true;
       for (Table target : targets) {
         Result res = target.get(get);
-        if (res.size() == 0) {
+        if (res.isEmpty()) {
           LOG.info("Row not available");
           replicatedToAll = false;
           break;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
index 53aabfe..8c218d0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationChangingPeerRegionservers.java
@@ -130,7 +130,7 @@ public class TestReplicationChangingPeerRegionservers extends TestReplicationBas
         fail("Waited too much time for put replication");
       }
       Result res = htable2.get(get);
-      if (res.size() == 0) {
+      if (res.isEmpty()) {
         LOG.info("Row not available");
         Thread.sleep(SLEEP_TIME);
       } else {


[09/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
index 8f5992c..f4f6a8c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProcedureProtos.java
@@ -1716,123 +1716,6 @@ public final class MasterProcedureProtos {
   }
 
   /**
-   * Protobuf enum {@code hbase.pb.DispatchMergingRegionsState}
-   */
-  public enum DispatchMergingRegionsState
-      implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_PREPARE = 1;</code>
-     */
-    DISPATCH_MERGING_REGIONS_PREPARE(1),
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;</code>
-     */
-    DISPATCH_MERGING_REGIONS_PRE_OPERATION(2),
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;</code>
-     */
-    DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS(3),
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;</code>
-     */
-    DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS(4),
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;</code>
-     */
-    DISPATCH_MERGING_REGIONS_POST_OPERATION(5),
-    ;
-
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_PREPARE = 1;</code>
-     */
-    public static final int DISPATCH_MERGING_REGIONS_PREPARE_VALUE = 1;
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;</code>
-     */
-    public static final int DISPATCH_MERGING_REGIONS_PRE_OPERATION_VALUE = 2;
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;</code>
-     */
-    public static final int DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS_VALUE = 3;
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;</code>
-     */
-    public static final int DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS_VALUE = 4;
-    /**
-     * <code>DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;</code>
-     */
-    public static final int DISPATCH_MERGING_REGIONS_POST_OPERATION_VALUE = 5;
-
-
-    public final int getNumber() {
-      return value;
-    }
-
-    /**
-     * @deprecated Use {@link #forNumber(int)} instead.
-     */
-    @java.lang.Deprecated
-    public static DispatchMergingRegionsState valueOf(int value) {
-      return forNumber(value);
-    }
-
-    public static DispatchMergingRegionsState forNumber(int value) {
-      switch (value) {
-        case 1: return DISPATCH_MERGING_REGIONS_PREPARE;
-        case 2: return DISPATCH_MERGING_REGIONS_PRE_OPERATION;
-        case 3: return DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS;
-        case 4: return DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS;
-        case 5: return DISPATCH_MERGING_REGIONS_POST_OPERATION;
-        default: return null;
-      }
-    }
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<DispatchMergingRegionsState>
-        internalGetValueMap() {
-      return internalValueMap;
-    }
-    private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
-        DispatchMergingRegionsState> internalValueMap =
-          new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<DispatchMergingRegionsState>() {
-            public DispatchMergingRegionsState findValueByNumber(int number) {
-              return DispatchMergingRegionsState.forNumber(number);
-            }
-          };
-
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
-        getValueDescriptor() {
-      return getDescriptor().getValues().get(ordinal());
-    }
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
-        getDescriptorForType() {
-      return getDescriptor();
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(14);
-    }
-
-    private static final DispatchMergingRegionsState[] VALUES = values();
-
-    public static DispatchMergingRegionsState valueOf(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
-      if (desc.getType() != getDescriptor()) {
-        throw new java.lang.IllegalArgumentException(
-          "EnumValueDescriptor is not for this type.");
-      }
-      return VALUES[desc.getIndex()];
-    }
-
-    private final int value;
-
-    private DispatchMergingRegionsState(int value) {
-      this.value = value;
-    }
-
-    // @@protoc_insertion_point(enum_scope:hbase.pb.DispatchMergingRegionsState)
-  }
-
-  /**
    * Protobuf enum {@code hbase.pb.MergeTableRegionsState}
    */
   public enum MergeTableRegionsState
@@ -1980,7 +1863,7 @@ public final class MasterProcedureProtos {
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15);
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(14);
     }
 
     private static final MergeTableRegionsState[] VALUES = values();
@@ -2142,7 +2025,7 @@ public final class MasterProcedureProtos {
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16);
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(15);
     }
 
     private static final SplitTableRegionState[] VALUES = values();
@@ -2303,7 +2186,7 @@ public final class MasterProcedureProtos {
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(17);
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.getDescriptor().getEnumTypes().get(16);
     }
 
     private static final ServerCrashState[] VALUES = values();
@@ -20009,8 +19892,8 @@ public final class MasterProcedureProtos {
 
   }
 
-  public interface DispatchMergingRegionsStateDataOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.DispatchMergingRegionsStateData)
+  public interface MergeTableRegionsStateDataOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsStateData)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
     /**
@@ -20027,63 +19910,63 @@ public final class MasterProcedureProtos {
     org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
 
     /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    boolean hasTableName();
-    /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName();
-    /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
-
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> 
         getRegionInfoList();
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     int getRegionInfoCount();
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
         getRegionInfoOrBuilderList();
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
         int index);
 
     /**
-     * <code>optional bool forcible = 4;</code>
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    boolean hasMergedRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo();
+    /**
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder();
+
+    /**
+     * <code>optional bool forcible = 4 [default = false];</code>
      */
     boolean hasForcible();
     /**
-     * <code>optional bool forcible = 4;</code>
+     * <code>optional bool forcible = 4 [default = false];</code>
      */
     boolean getForcible();
   }
   /**
-   * Protobuf type {@code hbase.pb.DispatchMergingRegionsStateData}
+   * Protobuf type {@code hbase.pb.MergeTableRegionsStateData}
    */
-  public  static final class DispatchMergingRegionsStateData extends
+  public  static final class MergeTableRegionsStateData extends
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.DispatchMergingRegionsStateData)
-      DispatchMergingRegionsStateDataOrBuilder {
-    // Use DispatchMergingRegionsStateData.newBuilder() to construct.
-    private DispatchMergingRegionsStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsStateData)
+      MergeTableRegionsStateDataOrBuilder {
+    // Use MergeTableRegionsStateData.newBuilder() to construct.
+    private MergeTableRegionsStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
       super(builder);
     }
-    private DispatchMergingRegionsStateData() {
+    private MergeTableRegionsStateData() {
       regionInfo_ = java.util.Collections.emptyList();
       forcible_ = false;
     }
@@ -20093,7 +19976,7 @@ public final class MasterProcedureProtos {
     getUnknownFields() {
       return this.unknownFields;
     }
-    private DispatchMergingRegionsStateData(
+    private MergeTableRegionsStateData(
         org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
         org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
@@ -20130,25 +20013,25 @@ public final class MasterProcedureProtos {
               break;
             }
             case 18: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = tableName_.toBuilder();
-              }
-              tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(tableName_);
-                tableName_ = subBuilder.buildPartial();
+              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+                regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>();
+                mutable_bitField0_ |= 0x00000002;
               }
-              bitField0_ |= 0x00000002;
+              regionInfo_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
               break;
             }
             case 26: {
-              if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
-                regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>();
-                mutable_bitField0_ |= 0x00000004;
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = mergedRegionInfo_.toBuilder();
               }
-              regionInfo_.add(
-                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+              mergedRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(mergedRegionInfo_);
+                mergedRegionInfo_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
               break;
             }
             case 32: {
@@ -20164,7 +20047,7 @@ public final class MasterProcedureProtos {
         throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
             e).setUnfinishedMessage(this);
       } finally {
-        if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
           regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
         }
         this.unknownFields = unknownFields.build();
@@ -20173,14 +20056,14 @@ public final class MasterProcedureProtos {
     }
     public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor;
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_descriptor;
     }
 
     protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.Builder.class);
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.Builder.class);
     }
 
     private int bitField0_;
@@ -20205,72 +20088,72 @@ public final class MasterProcedureProtos {
       return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
     }
 
-    public static final int TABLE_NAME_FIELD_NUMBER = 2;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_;
-    /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    public boolean hasTableName() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
-      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-    }
-    /**
-     * <code>required .hbase.pb.TableName table_name = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-    }
-
-    public static final int REGION_INFO_FIELD_NUMBER = 3;
+    public static final int REGION_INFO_FIELD_NUMBER = 2;
     private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
       return regionInfo_;
     }
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
         getRegionInfoOrBuilderList() {
       return regionInfo_;
     }
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     public int getRegionInfoCount() {
       return regionInfo_.size();
     }
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
       return regionInfo_.get(index);
     }
     /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
+     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
      */
     public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
         int index) {
       return regionInfo_.get(index);
     }
 
+    public static final int MERGED_REGION_INFO_FIELD_NUMBER = 3;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo mergedRegionInfo_;
+    /**
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    public boolean hasMergedRegionInfo() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo() {
+      return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_;
+    }
+    /**
+     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder() {
+      return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_;
+    }
+
     public static final int FORCIBLE_FIELD_NUMBER = 4;
     private boolean forcible_;
     /**
-     * <code>optional bool forcible = 4;</code>
+     * <code>optional bool forcible = 4 [default = false];</code>
      */
     public boolean hasForcible() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
     }
     /**
-     * <code>optional bool forcible = 4;</code>
+     * <code>optional bool forcible = 4 [default = false];</code>
      */
     public boolean getForcible() {
       return forcible_;
@@ -20286,7 +20169,7 @@ public final class MasterProcedureProtos {
         memoizedIsInitialized = 0;
         return false;
       }
-      if (!hasTableName()) {
+      if (!hasMergedRegionInfo()) {
         memoizedIsInitialized = 0;
         return false;
       }
@@ -20294,16 +20177,16 @@ public final class MasterProcedureProtos {
         memoizedIsInitialized = 0;
         return false;
       }
-      if (!getTableName().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
       for (int i = 0; i < getRegionInfoCount(); i++) {
         if (!getRegionInfo(i).isInitialized()) {
           memoizedIsInitialized = 0;
           return false;
         }
       }
+      if (!getMergedRegionInfo().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -20313,11 +20196,11 @@ public final class MasterProcedureProtos {
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         output.writeMessage(1, getUserInfo());
       }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, getTableName());
-      }
       for (int i = 0; i < regionInfo_.size(); i++) {
-        output.writeMessage(3, regionInfo_.get(i));
+        output.writeMessage(2, regionInfo_.get(i));
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(3, getMergedRegionInfo());
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeBool(4, forcible_);
@@ -20334,13 +20217,13 @@ public final class MasterProcedureProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeMessageSize(1, getUserInfo());
       }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+      for (int i = 0; i < regionInfo_.size(); i++) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, getTableName());
+          .computeMessageSize(2, regionInfo_.get(i));
       }
-      for (int i = 0; i < regionInfo_.size(); i++) {
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(3, regionInfo_.get(i));
+          .computeMessageSize(3, getMergedRegionInfo());
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
@@ -20357,1281 +20240,10 @@ public final class MasterProcedureProtos {
       if (obj == this) {
        return true;
       }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData)) {
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData)) {
         return super.equals(obj);
       }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData) obj;
-
-      boolean result = true;
-      result = result && (hasUserInfo() == other.hasUserInfo());
-      if (hasUserInfo()) {
-        result = result && getUserInfo()
-            .equals(other.getUserInfo());
-      }
-      result = result && (hasTableName() == other.hasTableName());
-      if (hasTableName()) {
-        result = result && getTableName()
-            .equals(other.getTableName());
-      }
-      result = result && getRegionInfoList()
-          .equals(other.getRegionInfoList());
-      result = result && (hasForcible() == other.hasForcible());
-      if (hasForcible()) {
-        result = result && (getForcible()
-            == other.getForcible());
-      }
-      result = result && unknownFields.equals(other.unknownFields);
-      return result;
-    }
-
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasUserInfo()) {
-        hash = (37 * hash) + USER_INFO_FIELD_NUMBER;
-        hash = (53 * hash) + getUserInfo().hashCode();
-      }
-      if (hasTableName()) {
-        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getTableName().hashCode();
-      }
-      if (getRegionInfoCount() > 0) {
-        hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionInfoList().hashCode();
-      }
-      if (hasForcible()) {
-        hash = (37 * hash) + FORCIBLE_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
-            getForcible());
-      }
-      hash = (29 * hash) + unknownFields.hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(byte[] data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        byte[] data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseDelimitedFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder() {
-      return DEFAULT_INSTANCE.toBuilder();
-    }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData prototype) {
-      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() {
-      return this == DEFAULT_INSTANCE
-          ? new Builder() : new Builder().mergeFrom(this);
-    }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.DispatchMergingRegionsStateData}
-     */
-    public static final class Builder extends
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.DispatchMergingRegionsStateData)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateDataOrBuilder {
-      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor;
-      }
-
-      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-                .alwaysUseFieldBuilders) {
-          getUserInfoFieldBuilder();
-          getTableNameFieldBuilder();
-          getRegionInfoFieldBuilder();
-        }
-      }
-      public Builder clear() {
-        super.clear();
-        if (userInfoBuilder_ == null) {
-          userInfo_ = null;
-        } else {
-          userInfoBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        if (tableNameBuilder_ == null) {
-          tableName_ = null;
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        if (regionInfoBuilder_ == null) {
-          regionInfo_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000004);
-        } else {
-          regionInfoBuilder_.clear();
-        }
-        forcible_ = false;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        return this;
-      }
-
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (userInfoBuilder_ == null) {
-          result.userInfo_ = userInfo_;
-        } else {
-          result.userInfo_ = userInfoBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        if (tableNameBuilder_ == null) {
-          result.tableName_ = tableName_;
-        } else {
-          result.tableName_ = tableNameBuilder_.build();
-        }
-        if (regionInfoBuilder_ == null) {
-          if (((bitField0_ & 0x00000004) == 0x00000004)) {
-            regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
-            bitField0_ = (bitField0_ & ~0x00000004);
-          }
-          result.regionInfo_ = regionInfo_;
-        } else {
-          result.regionInfo_ = regionInfoBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.forcible_ = forcible_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder clone() {
-        return (Builder) super.clone();
-      }
-      public Builder setField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.setField(field, value);
-      }
-      public Builder clearField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
-        return (Builder) super.clearField(field);
-      }
-      public Builder clearOneof(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
-        return (Builder) super.clearOneof(oneof);
-      }
-      public Builder setRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          int index, Object value) {
-        return (Builder) super.setRepeatedField(field, index, value);
-      }
-      public Builder addRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.addRepeatedField(field, value);
-      }
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData.getDefaultInstance()) return this;
-        if (other.hasUserInfo()) {
-          mergeUserInfo(other.getUserInfo());
-        }
-        if (other.hasTableName()) {
-          mergeTableName(other.getTableName());
-        }
-        if (regionInfoBuilder_ == null) {
-          if (!other.regionInfo_.isEmpty()) {
-            if (regionInfo_.isEmpty()) {
-              regionInfo_ = other.regionInfo_;
-              bitField0_ = (bitField0_ & ~0x00000004);
-            } else {
-              ensureRegionInfoIsMutable();
-              regionInfo_.addAll(other.regionInfo_);
-            }
-            onChanged();
-          }
-        } else {
-          if (!other.regionInfo_.isEmpty()) {
-            if (regionInfoBuilder_.isEmpty()) {
-              regionInfoBuilder_.dispose();
-              regionInfoBuilder_ = null;
-              regionInfo_ = other.regionInfo_;
-              bitField0_ = (bitField0_ & ~0x00000004);
-              regionInfoBuilder_ = 
-                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
-                   getRegionInfoFieldBuilder() : null;
-            } else {
-              regionInfoBuilder_.addAllMessages(other.regionInfo_);
-            }
-          }
-        }
-        if (other.hasForcible()) {
-          setForcible(other.getForcible());
-        }
-        this.mergeUnknownFields(other.unknownFields);
-        onChanged();
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (!hasUserInfo()) {
-          return false;
-        }
-        if (!hasTableName()) {
-          return false;
-        }
-        if (!getUserInfo().isInitialized()) {
-          return false;
-        }
-        if (!getTableName().isInitialized()) {
-          return false;
-        }
-        for (int i = 0; i < getRegionInfoCount(); i++) {
-          if (!getRegionInfo(i).isInitialized()) {
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData) e.getUnfinishedMessage();
-          throw e.unwrapIOException();
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> userInfoBuilder_;
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public boolean hasUserInfo() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
-        if (userInfoBuilder_ == null) {
-          return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
-        } else {
-          return userInfoBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public Builder setUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) {
-        if (userInfoBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          userInfo_ = value;
-          onChanged();
-        } else {
-          userInfoBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public Builder setUserInfo(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder builderForValue) {
-        if (userInfoBuilder_ == null) {
-          userInfo_ = builderForValue.build();
-          onChanged();
-        } else {
-          userInfoBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public Builder mergeUserInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation value) {
-        if (userInfoBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              userInfo_ != null &&
-              userInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance()) {
-            userInfo_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.newBuilder(userInfo_).mergeFrom(value).buildPartial();
-          } else {
-            userInfo_ = value;
-          }
-          onChanged();
-        } else {
-          userInfoBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public Builder clearUserInfo() {
-        if (userInfoBuilder_ == null) {
-          userInfo_ = null;
-          onChanged();
-        } else {
-          userInfoBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder getUserInfoBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getUserInfoFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
-        if (userInfoBuilder_ != null) {
-          return userInfoBuilder_.getMessageOrBuilder();
-        } else {
-          return userInfo_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
-        }
-      }
-      /**
-       * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder> 
-          getUserInfoFieldBuilder() {
-        if (userInfoBuilder_ == null) {
-          userInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder>(
-                  getUserInfo(),
-                  getParentForChildren(),
-                  isClean());
-          userInfo_ = null;
-        }
-        return userInfoBuilder_;
-      }
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public boolean hasTableName() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
-        if (tableNameBuilder_ == null) {
-          return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-        } else {
-          return tableNameBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          tableName_ = value;
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public Builder setTableName(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
-        if (tableNameBuilder_ == null) {
-          tableName_ = builderForValue.build();
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              tableName_ != null &&
-              tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
-            tableName_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
-          } else {
-            tableName_ = value;
-          }
-          onChanged();
-        } else {
-          tableNameBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public Builder clearTableName() {
-        if (tableNameBuilder_ == null) {
-          tableName_ = null;
-          onChanged();
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
-        bitField0_ |= 0x00000002;
-        onChanged();
-        return getTableNameFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-        if (tableNameBuilder_ != null) {
-          return tableNameBuilder_.getMessageOrBuilder();
-        } else {
-          return tableName_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-        }
-      }
-      /**
-       * <code>required .hbase.pb.TableName table_name = 2;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
-          getTableNameFieldBuilder() {
-        if (tableNameBuilder_ == null) {
-          tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
-                  getTableName(),
-                  getParentForChildren(),
-                  isClean());
-          tableName_ = null;
-        }
-        return tableNameBuilder_;
-      }
-
-      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_ =
-        java.util.Collections.emptyList();
-      private void ensureRegionInfoIsMutable() {
-        if (!((bitField0_ & 0x00000004) == 0x00000004)) {
-          regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>(regionInfo_);
-          bitField0_ |= 0x00000004;
-         }
-      }
-
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
-
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
-        if (regionInfoBuilder_ == null) {
-          return java.util.Collections.unmodifiableList(regionInfo_);
-        } else {
-          return regionInfoBuilder_.getMessageList();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public int getRegionInfoCount() {
-        if (regionInfoBuilder_ == null) {
-          return regionInfo_.size();
-        } else {
-          return regionInfoBuilder_.getCount();
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
-        if (regionInfoBuilder_ == null) {
-          return regionInfo_.get(index);
-        } else {
-          return regionInfoBuilder_.getMessage(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder setRegionInfo(
-          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
-        if (regionInfoBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureRegionInfoIsMutable();
-          regionInfo_.set(index, value);
-          onChanged();
-        } else {
-          regionInfoBuilder_.setMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder setRegionInfo(
-          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
-        if (regionInfoBuilder_ == null) {
-          ensureRegionInfoIsMutable();
-          regionInfo_.set(index, builderForValue.build());
-          onChanged();
-        } else {
-          regionInfoBuilder_.setMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder addRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
-        if (regionInfoBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureRegionInfoIsMutable();
-          regionInfo_.add(value);
-          onChanged();
-        } else {
-          regionInfoBuilder_.addMessage(value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder addRegionInfo(
-          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
-        if (regionInfoBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          ensureRegionInfoIsMutable();
-          regionInfo_.add(index, value);
-          onChanged();
-        } else {
-          regionInfoBuilder_.addMessage(index, value);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder addRegionInfo(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
-        if (regionInfoBuilder_ == null) {
-          ensureRegionInfoIsMutable();
-          regionInfo_.add(builderForValue.build());
-          onChanged();
-        } else {
-          regionInfoBuilder_.addMessage(builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder addRegionInfo(
-          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
-        if (regionInfoBuilder_ == null) {
-          ensureRegionInfoIsMutable();
-          regionInfo_.add(index, builderForValue.build());
-          onChanged();
-        } else {
-          regionInfoBuilder_.addMessage(index, builderForValue.build());
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder addAllRegionInfo(
-          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> values) {
-        if (regionInfoBuilder_ == null) {
-          ensureRegionInfoIsMutable();
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
-              values, regionInfo_);
-          onChanged();
-        } else {
-          regionInfoBuilder_.addAllMessages(values);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder clearRegionInfo() {
-        if (regionInfoBuilder_ == null) {
-          regionInfo_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000004);
-          onChanged();
-        } else {
-          regionInfoBuilder_.clear();
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public Builder removeRegionInfo(int index) {
-        if (regionInfoBuilder_ == null) {
-          ensureRegionInfoIsMutable();
-          regionInfo_.remove(index);
-          onChanged();
-        } else {
-          regionInfoBuilder_.remove(index);
-        }
-        return this;
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
-          int index) {
-        return getRegionInfoFieldBuilder().getBuilder(index);
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
-          int index) {
-        if (regionInfoBuilder_ == null) {
-          return regionInfo_.get(index);  } else {
-          return regionInfoBuilder_.getMessageOrBuilder(index);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
-           getRegionInfoOrBuilderList() {
-        if (regionInfoBuilder_ != null) {
-          return regionInfoBuilder_.getMessageOrBuilderList();
-        } else {
-          return java.util.Collections.unmodifiableList(regionInfo_);
-        }
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
-        return getRegionInfoFieldBuilder().addBuilder(
-            org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
-          int index) {
-        return getRegionInfoFieldBuilder().addBuilder(
-            index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
-      }
-      /**
-       * <code>repeated .hbase.pb.RegionInfo region_info = 3;</code>
-       */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder> 
-           getRegionInfoBuilderList() {
-        return getRegionInfoFieldBuilder().getBuilderList();
-      }
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
-          getRegionInfoFieldBuilder() {
-        if (regionInfoBuilder_ == null) {
-          regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
-                  regionInfo_,
-                  ((bitField0_ & 0x00000004) == 0x00000004),
-                  getParentForChildren(),
-                  isClean());
-          regionInfo_ = null;
-        }
-        return regionInfoBuilder_;
-      }
-
-      private boolean forcible_ ;
-      /**
-       * <code>optional bool forcible = 4;</code>
-       */
-      public boolean hasForcible() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
-      }
-      /**
-       * <code>optional bool forcible = 4;</code>
-       */
-      public boolean getForcible() {
-        return forcible_;
-      }
-      /**
-       * <code>optional bool forcible = 4;</code>
-       */
-      public Builder setForcible(boolean value) {
-        bitField0_ |= 0x00000008;
-        forcible_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool forcible = 4;</code>
-       */
-      public Builder clearForcible() {
-        bitField0_ = (bitField0_ & ~0x00000008);
-        forcible_ = false;
-        onChanged();
-        return this;
-      }
-      public final Builder setUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.setUnknownFields(unknownFields);
-      }
-
-      public final Builder mergeUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.mergeUnknownFields(unknownFields);
-      }
-
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.DispatchMergingRegionsStateData)
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.DispatchMergingRegionsStateData)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData DEFAULT_INSTANCE;
-    static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData();
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData getDefaultInstance() {
-      return DEFAULT_INSTANCE;
-    }
-
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsStateData>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<DispatchMergingRegionsStateData>() {
-      public DispatchMergingRegionsStateData parsePartialFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new DispatchMergingRegionsStateData(input, extensionRegistry);
-      }
-    };
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsStateData> parser() {
-      return PARSER;
-    }
-
-    @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<DispatchMergingRegionsStateData> getParserForType() {
-      return PARSER;
-    }
-
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsStateData getDefaultInstanceForType() {
-      return DEFAULT_INSTANCE;
-    }
-
-  }
-
-  public interface MergeTableRegionsStateDataOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeTableRegionsStateData)
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    boolean hasUserInfo();
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo();
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder();
-
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> 
-        getRegionInfoList();
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    int getRegionInfoCount();
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
-        getRegionInfoOrBuilderList();
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
-        int index);
-
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    boolean hasMergedRegionInfo();
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo();
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder();
-
-    /**
-     * <code>optional bool forcible = 4 [default = false];</code>
-     */
-    boolean hasForcible();
-    /**
-     * <code>optional bool forcible = 4 [default = false];</code>
-     */
-    boolean getForcible();
-  }
-  /**
-   * Protobuf type {@code hbase.pb.MergeTableRegionsStateData}
-   */
-  public  static final class MergeTableRegionsStateData extends
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.MergeTableRegionsStateData)
-      MergeTableRegionsStateDataOrBuilder {
-    // Use MergeTableRegionsStateData.newBuilder() to construct.
-    private MergeTableRegionsStateData(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
-      super(builder);
-    }
-    private MergeTableRegionsStateData() {
-      regionInfo_ = java.util.Collections.emptyList();
-      forcible_ = false;
-    }
-
-    @java.lang.Override
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-    getUnknownFields() {
-      return this.unknownFields;
-    }
-    private MergeTableRegionsStateData(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      this();
-      int mutable_bitField0_ = 0;
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = userInfo_.toBuilder();
-              }
-              userInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(userInfo_);
-                userInfo_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-            case 18: {
-              if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
-                regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>();
-                mutable_bitField0_ |= 0x00000002;
-              }
-              regionInfo_.add(
-                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
-              break;
-            }
-            case 26: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = mergedRegionInfo_.toBuilder();
-              }
-              mergedRegionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(mergedRegionInfo_);
-                mergedRegionInfo_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000002;
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000004;
-              forcible_ = input.readBool();
-              break;
-            }
-          }
-        }
-      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
-            e).setUnfinishedMessage(this);
-      } finally {
-        if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
-          regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
-        }
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_descriptor;
-    }
-
-    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.internal_static_hbase_pb_MergeTableRegionsStateData_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData.Builder.class);
-    }
-
-    private int bitField0_;
-    public static final int USER_INFO_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation userInfo_;
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    public boolean hasUserInfo() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation getUserInfo() {
-      return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
-    }
-    /**
-     * <code>required .hbase.pb.UserInformation user_info = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformationOrBuilder getUserInfoOrBuilder() {
-      return userInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance() : userInfo_;
-    }
-
-    public static final int REGION_INFO_FIELD_NUMBER = 2;
-    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
-      return regionInfo_;
-    }
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
-        getRegionInfoOrBuilderList() {
-      return regionInfo_;
-    }
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    public int getRegionInfoCount() {
-      return regionInfo_.size();
-    }
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
-      return regionInfo_.get(index);
-    }
-    /**
-     * <code>repeated .hbase.pb.RegionInfo region_info = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
-        int index) {
-      return regionInfo_.get(index);
-    }
-
-    public static final int MERGED_REGION_INFO_FIELD_NUMBER = 3;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo mergedRegionInfo_;
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    public boolean hasMergedRegionInfo() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getMergedRegionInfo() {
-      return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_;
-    }
-    /**
-     * <code>required .hbase.pb.RegionInfo merged_region_info = 3;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getMergedRegionInfoOrBuilder() {
-      return mergedRegionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : mergedRegionInfo_;
-    }
-
-    public static final int FORCIBLE_FIELD_NUMBER = 4;
-    private boolean forcible_;
-    /**
-     * <code>optional bool forcible = 4 [default = false];</code>
-     */
-    public boolean hasForcible() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>optional bool forcible = 4 [default = false];</code>
-     */
-    public boolean getForcible() {
-      return forcible_;
-    }
-
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized == 1) return true;
-      if (isInitialized == 0) return false;
-
-      if (!hasUserInfo()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasMergedRegionInfo()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getUserInfo().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      for (int i = 0; i < getRegionInfoCount(); i++) {
-        if (!getRegionInfo(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      if (!getMergedRegionInfo().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, getUserInfo());
-      }
-      for (int i = 0; i < regionInfo_.size(); i++) {
-        output.writeMessage(2, regionInfo_.get(i));
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(3, getMergedRegionInfo());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeBool(4, forcible_);
-      }
-      unknownFields.writeTo(output);
-    }
-
-    public int getSerializedSize() {
-      int size = memoizedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, getUserInfo());
-      }
-      for (int i = 0; i < regionInfo_.size(); i++) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, regionInfo_.get(i));
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(3, getMergedRegionInfo());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeBoolSize(4, forcible_);
-      }
-      size += unknownFields.getSerializedSize();
-      memoizedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData) obj;
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsStateData) obj;
 
       boolean result = true;
       result = result && (hasUserInfo() == other.hasUserInfo());
@@ -25422,11 +24034,6 @@ public final class MasterProcedureProtos {
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RestoreSnapshotStateData_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_DispatchMergingRegionsStateData_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_DispatchMergingRegionsStateData_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_MergeTableRegionsStateData_descriptor;
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -25524,146 +24131,135 @@ public final class MasterProcedureProtos {
       "o\0221\n\023region_info_for_add\030\006 \003(\0132\024.hbase.p" +
       "b.RegionInfo\022T\n!parent_to_child_regions_" +
       "pair_list\030\007 \003(\0132).hbase.pb.RestoreParent" +
-      "ToChildRegionsPair\"\265\001\n\037DispatchMergingRe" +
-      "gionsStateData\022,\n\tuser_info\030\001 \002(\0132\031.hbas" +
-      "e.pb.UserInformation\022\'\n\ntable_name\030\002 \002(\013" +
-      "2\023.hbase.pb.TableName\022)\n\013region_info\030\003 \003" +
-      "(\0132\024.hbase.pb.RegionInfo\022\020\n\010forcible\030\004 \001" +
-      "(\010\"\300\001\n\032MergeTableRegionsStateData\022,\n\tuse",
-      "r_info\030\001 \002(\0132\031.hbase.pb.UserInformation\022" +
-      ")\n\013region_info\030\002 \003(\0132\024.hbase.pb.RegionIn" +
-      "fo\0220\n\022merged_region_info\030\003 \002(\0132\024.hbase.p" +
-      "b.RegionInfo\022\027\n\010forcible\030\004 \001(\010:\005false\"\254\001" +
-      "\n\031SplitTableRegionStateData\022,\n\tuser_info" +
-      "\030\001 \002(\0132\031.hbase.pb.UserInformation\0220\n\022par" +
-      "ent_region_info\030\002 \002(\0132\024.hbase.pb.RegionI" +
-      "nfo\022/\n\021child_region_info\030\003 \003(\0132\024.hbase.p" +
-      "b.RegionInfo\"\201\002\n\024ServerCrashStateData\022)\n" +
-      "\013server_name\030\001 \002(\0132\024.hbase.pb.ServerName",
-      "\022\036\n\026distributed_log_replay\030\002 \001(\010\0227\n\031regi" +
-      "ons_on_crashed_server\030\003 \003(\0132\024.hbase.pb.R" +
-      "egionInfo\022.\n\020regions_assigned\030\004 \003(\0132\024.hb" +
-      "ase.pb.RegionInfo\022\025\n\rcarrying_meta\030\005 \001(\010" +
-      "\022\036\n\020should_split_wal\030\006 \001(\010:\004true*\330\001\n\020Cre" +
-      "ateTableState\022\036\n\032CREATE_TABLE_PRE_OPERAT" +
-      "ION\020\001\022 \n\034CREATE_TABLE_WRITE_FS_LAYOUT\020\002\022" +
-      "\034\n\030CREATE_TABLE_ADD_TO_META\020\003\022\037\n\033CREATE_" +
-      "TABLE_ASSIGN_REGIONS\020\004\022\"\n\036CREATE_TABLE_U" +
-      "PDATE_DESC_CACHE\020\005\022\037\n\033CREATE_TABLE_POST_",
-      "OPERATION\020\006*\207\002\n\020ModifyTableState\022\030\n\024MODI" +
-      "FY_TABLE_PREPARE\020\001\022\036\n\032MODIFY_TABLE_PRE_O" +
-      "PERATION\020\002\022(\n$MODIFY_TABLE_UPDATE_TABLE_" +
-      "DESCRIPTOR\020\003\022&\n\"MODIFY_TABLE_REMOVE_REPL" +
-      "ICA_COLUMN\020\004\022!\n\035MODIFY_TABLE_DELETE_FS_L" +
-      "AYOUT\020\005\022\037\n\033MODIFY_TABLE_POST_OPERATION\020\006" +
-      "\022#\n\037MODIFY_TABLE_REOPEN_ALL_REGIONS\020\007*\212\002" +
-      "\n\022TruncateTableState\022 \n\034TRUNCATE_TABLE_P" +
-      "RE_OPERATION\020\001\022#\n\037TRUNCATE_TABLE_REMOVE_" +
-      "FROM_META\020\002\022\"\n\036TRUNCATE_TABLE_CLEAR_FS_L",
-      "AYOUT\020\003\022#\n\037TRUNCATE_TABLE_CREATE_FS_LAYO" +
-      "UT\020\004\022\036\n\032TRUNCATE_TABLE_ADD_TO_META\020\005\022!\n\035" +
-      "TRUNCATE_TABLE_ASSIGN_REGIONS\020\006\022!\n\035TRUNC" +
-      "ATE_TABLE_POST_OPERATION\020\007*\337\001\n\020DeleteTab" +
-      "leState\022\036\n\032DELETE_TABLE_PRE_OPERATION\020\001\022" +
-      "!\n\035DELETE_TABLE_REMOVE_FROM_META\020\002\022 \n\034DE" +
-      "LETE_TABLE_CLEAR_FS_LAYOUT\020\003\022\"\n\036DELETE_T" +
-      "ABLE_UPDATE_DESC_CACHE\020\004\022!\n\035DELETE_TABLE" +
-      "_UNASSIGN_REGIONS\020\005\022\037\n\033DELETE_TABLE_POST" +
-      "_OPERATION\020\006*\320\001\n\024CreateNamespaceState\022\034\n",
-      "\030CREATE_NAMESPACE_PREPARE\020\001\022%\n!CREATE_NA" +
-      "MESPACE_CREATE_DIRECTORY\020\002\022)\n%CREATE_NAM" +
-      "ESPACE_INSERT_INTO_NS_TABLE\020\003\022\036\n\032CREATE_" +
-      "NAMESPACE_UPDATE_ZK\020\004\022(\n$CREATE_NAMESPAC" +
-      "E_SET_NAMESPACE_QUOTA\020\005*z\n\024ModifyNamespa" +
-      "ceState\022\034\n\030MODIFY_NAMESPACE_PREPARE\020\001\022$\n" +
-      " MODIFY_NAMESPACE_UPDATE_NS_TABLE\020\002\022\036\n\032M" +
-      "ODIFY_NAMESPACE_UPDATE_ZK\020\003*\332\001\n\024DeleteNa" +
-      "mespaceState\022\034\n\030DELETE_NAMESPACE_PREPARE" +
-      "\020\001\022)\n%DELETE_NAMESPACE_DELETE_FROM_NS_TA",
-      "BLE\020\002\022#\n\037DELETE_NAMESPACE_REMOVE_FROM_ZK" +
-      "\020\003\022\'\n#DELETE_NAMESPACE_DELETE_DIRECTORIE" +
-      "S\020\004\022+\n\'DELETE_NAMESPACE_REMOVE_NAMESPACE" +
-      "_QUOTA\020\005*\331\001\n\024AddColumnFamilyState\022\035\n\031ADD" +
-      "_COLUMN_FAMILY_PREPARE\020\001\022#\n\037ADD_COLUMN_F" +
-      "AMILY_PRE_OPERATION\020\002\022-\n)ADD_COLUMN_FAMI" +
-      "LY_UPDATE_TABLE_DESCRIPTOR\020\003\022$\n ADD_COLU" +
-      "MN_FAMILY_POST_OPERATION\020\004\022(\n$ADD_COLUMN" +
-      "_FAMILY_REOPEN_ALL_REGIONS\020\005*\353\001\n\027ModifyC" +
-      "olumnFamilyState\022 \n\034MODIFY_COLUMN_FAMILY",
-      "_PREPARE\020\001\022&\n\"MODIFY_COLUMN_FAMILY_PRE_O" +
-      "PERATION\020\002\0220\n,MODIFY_COLUMN_FAMILY_UPDAT" +
-      "E_TABLE_DESCRIPTOR\020\003\022\'\n#MODIFY_COLUMN_FA" +
-      "MILY_POST_OPERATION\020\004\022+\n\'MODIFY_COLUMN_F" +
-      "AMILY_REOPEN_ALL_REGIONS\020\005*\226\002\n\027DeleteCol" +
-      "umnFamilyState\022 \n\034DELETE_COLUMN_FAMILY_P" +
-      "REPARE\020\001\022&\n\"DELETE_COLUMN_FAMILY_PRE_OPE" +
-      "RATION\020\002\0220\n,DELETE_COLUMN_FAMILY_UPDATE_" +
-      "TABLE_DESCRIPTOR\020\003\022)\n%DELETE_COLUMN_FAMI" +
-      "LY_DELETE_FS_LAYOUT\020\004\022\'\n#DELETE_COLUMN_F",
-      "AMILY_POST_OPERATION\020\005\022+\n\'DELETE_COLUMN_" +
-      "FAMILY_REOPEN_ALL_REGIONS\020\006*\350\001\n\020EnableTa" +
-      "bleState\022\030\n\024ENABLE_TABLE_PREPARE\020\001\022\036\n\032EN" +
-      "ABLE_TABLE_PRE_OPERATION\020\002\022)\n%ENABLE_TAB" +
-      "LE_SET_ENABLING_TABLE_STATE\020\003\022$\n ENABLE_" +
-      "TABLE_MARK_REGIONS_ONLINE\020\004\022(\n$ENABLE_TA" +
-      "BLE_SET_ENABLED_TABLE_STATE\020\005\022\037\n\033ENABLE_" +
-      "TABLE_POST_OPERATION\020\006*\362\001\n\021DisableTableS" +
-      "tate\022\031\n\025DISABLE_TABLE_PREPARE\020\001\022\037\n\033DISAB" +
-      "LE_TABLE_PRE_OPERATION\020\002\022+\n\'DISABLE_TABL",
-      "E_SET_DISABLING_TABLE_STATE\020\003\022&\n\"DISABLE" +
-      "_TABLE_MARK_REGIONS_OFFLINE\020\004\022*\n&DISABLE" +
-      "_TABLE_SET_DISABLED_TABLE_STATE\020\005\022 \n\034DIS" +
-      "ABLE_TABLE_POST_OPERATION\020\006*\346\001\n\022CloneSna" +
-      "pshotState\022 \n\034CLONE_SNAPSHOT_PRE_OPERATI" +
-      "ON\020\001\022\"\n\036CLONE_SNAPSHOT_WRITE_FS_LAYOUT\020\002" +
-      "\022\036\n\032CLONE_SNAPSHOT_ADD_TO_META\020\003\022!\n\035CLON" +
-      "E_SNAPSHOT_ASSIGN_REGIONS\020\004\022$\n CLONE_SNA" +
-      "PSHOT_UPDATE_DESC_CACHE\020\005\022!\n\035CLONE_SNAPS" +
-      "HOT_POST_OPERATION\020\006*\260\001\n\024RestoreSnapshot",
-      "State\022\"\n\036RESTORE_SNAPSHOT_PRE_OPERATION\020" +
-      "\001\022,\n(RESTORE_SNAPSHOT_UPDATE_TABLE_DESCR" +
-      "IPTOR\020\002\022$\n RESTORE_SNAPSHOT_WRITE_FS_LAY" +
-      "OUT\020\003\022 \n\034RESTORE_SNAPSHOT_UPDATE_META\020\004*" +
-      "\376\001\n\033DispatchMergingRegionsState\022$\n DISPA" +
-      "TCH_MERGING_REGIONS_PREPARE\020\001\022*\n&DISPATC" +
-      "H_MERGING_REGIONS_PRE_OPERATION\020\002\0223\n/DIS" +
-      "PATCH_MERGING_REGIONS_MOVE_REGION_TO_SAM" +
-      "E_RS\020\003\022+\n\'DISPATCH_MERGING_REGIONS_DO_ME" +
-      "RGE_IN_RS\020\004\022+\n\'DISPATCH_MERGING_REGIONS_",
-      "POST_OPERATION\020\005*\376\003\n\026MergeTableRegionsSt" +
-      "ate\022\037\n\033MERGE_TABLE_REGIONS_PREPARE\020\001\022.\n*" +
-      "MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_" +
-      "RS\020\002\022+\n\'MERGE_TABLE_

<TRUNCATED>

[36/50] [abbrv] hbase git commit: HBASE-17407: Correct update of maxFlushedSeqId in HRegion

Posted by el...@apache.org.
HBASE-17407: Correct update of maxFlushedSeqId in HRegion

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f254e278
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f254e278
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f254e278

Branch: refs/heads/HBASE-16961
Commit: f254e278ece751e67c92570aef4b15fddab22a94
Parents: 3abd13d
Author: eshcar <es...@yahoo-inc.com>
Authored: Thu Jan 19 01:11:58 2017 +0200
Committer: zhangduo <zh...@apache.org>
Committed: Mon Jan 23 09:22:51 2017 +0800

----------------------------------------------------------------------
 .../hbase/regionserver/CompactingMemStore.java  | 24 ++++++++++++++++----
 .../hbase/regionserver/CompactionPipeline.java  |  8 +++++++
 .../hbase/regionserver/DefaultMemStore.java     |  4 +++-
 .../hadoop/hbase/regionserver/HRegion.java      | 10 ++++----
 .../hadoop/hbase/regionserver/HStore.java       |  4 ++--
 .../hadoop/hbase/regionserver/MemStore.java     | 10 ++++----
 .../hbase/regionserver/wal/AbstractFSWAL.java   |  9 ++++++++
 .../regionserver/wal/SequenceIdAccounting.java  | 21 +++++++++++++----
 .../hadoop/hbase/wal/DisabledWALProvider.java   |  6 +++++
 .../java/org/apache/hadoop/hbase/wal/WAL.java   |  2 ++
 10 files changed, 75 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index ed7d274..48dc880 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -124,13 +125,20 @@ public class CompactingMemStore extends AbstractMemStore {
   }
 
   /**
-   * This method is called when it is clear that the flush to disk is completed.
-   * The store may do any post-flush actions at this point.
-   * One example is to update the WAL with sequence number that is known only at the store level.
+   * This method is called before the flush is executed.
+   * @return an estimation (lower bound) of the unflushed sequence id in memstore after the flush
+   * is executed. if memstore will be cleared returns {@code HConstants.NO_SEQNUM}.
    */
   @Override
-  public void finalizeFlush() {
-    updateLowestUnflushedSequenceIdInWAL(false);
+  public long preFlushSeqIDEstimation() {
+    if(compositeSnapshot) {
+      return HConstants.NO_SEQNUM;
+    }
+    Segment segment = getLastSegment();
+    if(segment == null) {
+      return HConstants.NO_SEQNUM;
+    }
+    return segment.getMinSequenceId();
   }
 
   @Override
@@ -364,6 +372,12 @@ public class CompactingMemStore extends AbstractMemStore {
     }
   }
 
+  private Segment getLastSegment() {
+    Segment localActive = getActive();
+    Segment tail = pipeline.getTail();
+    return tail == null ? localActive : tail;
+  }
+
   private byte[] getFamilyNameInBytes() {
     return store.getFamily().getName();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
index e533bd0..9a844e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionPipeline.java
@@ -267,6 +267,14 @@ public class CompactionPipeline {
     if(segment != null) pipeline.addLast(segment);
   }
 
+  public Segment getTail() {
+    List<? extends Segment> localCopy = getSegments();
+    if(localCopy.isEmpty()) {
+      return null;
+    }
+    return localCopy.get(localCopy.size()-1);
+  }
+
   private boolean addFirst(ImmutableSegment segment) {
     pipeline.addFirst(segment);
     return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index d4e6e12..63af570 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -169,7 +170,8 @@ public class DefaultMemStore extends AbstractMemStore {
   }
 
   @Override
-  public void finalizeFlush() {
+  public long preFlushSeqIDEstimation() {
+    return HConstants.NO_SEQNUM;
   }
 
   @Override public boolean isSloppy() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index f35d788..ef6239d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2412,9 +2412,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     status.setStatus("Preparing flush snapshotting stores in " + getRegionInfo().getEncodedName());
     MemstoreSize totalSizeOfFlushableStores = new MemstoreSize();
 
-    Set<byte[]> flushedFamilyNames = new HashSet<byte[]>();
+    Map<byte[], Long> flushedFamilyNamesToSeq = new HashMap<>();
     for (Store store: storesToFlush) {
-      flushedFamilyNames.add(store.getFamily().getName());
+      flushedFamilyNamesToSeq.put(store.getFamily().getName(),
+          ((HStore) store).preFlushSeqIDEstimation());
     }
 
     TreeMap<byte[], StoreFlushContext> storeFlushCtxs
@@ -2434,7 +2435,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     try {
       if (wal != null) {
         Long earliestUnflushedSequenceIdForTheRegion =
-            wal.startCacheFlush(encodedRegionName, flushedFamilyNames);
+            wal.startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq);
         if (earliestUnflushedSequenceIdForTheRegion == null) {
           // This should never happen. This is how startCacheFlush signals flush cannot proceed.
           String msg = this.getRegionInfo().getEncodedName() + " flush aborted; WAL closing.";
@@ -2677,9 +2678,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
 
     // If we get to here, the HStores have been written.
-    for(Store storeToFlush :storesToFlush) {
-      ((HStore) storeToFlush).finalizeFlush();
-    }
     if (wal != null) {
       wal.completeCacheFlush(this.getRegionInfo().getEncodedNameAsBytes());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 425667a..ad23ce0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -2509,8 +2509,8 @@ public class HStore implements Store {
     }
   }
 
-  public void finalizeFlush() {
-    memstore.finalizeFlush();
+  public Long preFlushSeqIDEstimation() {
+    return memstore.preFlushSeqIDEstimation();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
index b094476..38d3e44 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStore.java
@@ -119,12 +119,12 @@ public interface MemStore {
   MemstoreSize size();
 
   /**
-   * This method is called when it is clear that the flush to disk is completed.
-   * The store may do any post-flush actions at this point.
-   * One example is to update the wal with sequence number that is known only at the store level.
+   * This method is called before the flush is executed.
+   * @return an estimation (lower bound) of the unflushed sequence id in memstore after the flush
+   * is executed. if memstore will be cleared returns {@code HConstants.NO_SEQNUM}.
    */
-  void finalizeFlush();
+  long preFlushSeqIDEstimation();
 
-  /* Return true if the memstore may need some extra memory space*/
+  /* Return true if the memstore may use some extra memory space*/
   boolean isSloppy();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 316e2f6..7e3bd59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -429,6 +429,15 @@ public abstract class AbstractFSWAL<W> implements WAL {
   }
 
   @Override
+  public Long startCacheFlush(byte[] encodedRegionName, Map<byte[], Long> familyToSeq) {
+    if (!closeBarrier.beginOp()) {
+      LOG.info("Flush not started for " + Bytes.toString(encodedRegionName) + "; server closing.");
+      return null;
+    }
+    return this.sequenceIdAccounting.startCacheFlush(encodedRegionName, familyToSeq);
+  }
+
+  @Override
   public void completeCacheFlush(byte[] encodedRegionName) {
     this.sequenceIdAccounting.completeCacheFlush(encodedRegionName);
     closeBarrier.endOp();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
index 6e7ad9b..8226b82 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceIdAccounting.java
@@ -264,6 +264,14 @@ class SequenceIdAccounting {
    * oldest/lowest outstanding edit.
    */
   Long startCacheFlush(final byte[] encodedRegionName, final Set<byte[]> families) {
+    Map<byte[],Long> familytoSeq = new HashMap<>();
+    for (byte[] familyName : families){
+      familytoSeq.put(familyName,HConstants.NO_SEQNUM);
+    }
+    return startCacheFlush(encodedRegionName,familytoSeq);
+  }
+
+  Long startCacheFlush(final byte[] encodedRegionName, final Map<byte[], Long> familyToSeq) {
     Map<ImmutableByteArray, Long> oldSequenceIds = null;
     Long lowestUnflushedInRegion = HConstants.NO_SEQNUM;
     synchronized (tieLock) {
@@ -273,9 +281,14 @@ class SequenceIdAccounting {
         // circumstance because another concurrent thread now may add sequenceids for this family
         // (see above in getOrCreateLowestSequenceId). Make sure you are ok with this. Usually it
         // is fine because updates are blocked when this method is called. Make sure!!!
-        for (byte[] familyName : families) {
-          ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap(familyName);
-          Long seqId = m.remove(familyNameWrapper);
+        for (Map.Entry<byte[], Long> entry : familyToSeq.entrySet()) {
+          ImmutableByteArray familyNameWrapper = ImmutableByteArray.wrap((byte[]) entry.getKey());
+          Long seqId = null;
+          if(entry.getValue() == HConstants.NO_SEQNUM) {
+            seqId = m.remove(familyNameWrapper);
+          } else {
+            seqId = m.replace(familyNameWrapper, entry.getValue());
+          }
           if (seqId != null) {
             if (oldSequenceIds == null) {
               oldSequenceIds = new HashMap<>();
@@ -344,7 +357,7 @@ class SequenceIdAccounting {
     if (flushing != null) {
       for (Map.Entry<ImmutableByteArray, Long> e : flushing.entrySet()) {
         Long currentId = tmpMap.get(e.getKey());
-        if (currentId != null && currentId.longValue() <= e.getValue().longValue()) {
+        if (currentId != null && currentId.longValue() < e.getValue().longValue()) {
           String errorStr = Bytes.toString(encodedRegionName) + " family "
               + e.getKey().toStringUtf8() + " acquired edits out of order current memstore seq="
               + currentId + ", previous oldest unflushed id=" + e.getValue();

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
index 337f2b4..8f224fc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DisabledWALProvider.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.wal;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -195,6 +196,11 @@ class DisabledWALProvider implements WALProvider {
       sync();
     }
 
+    public Long startCacheFlush(final byte[] encodedRegionName, Map<byte[], Long>
+        flushedFamilyNamesToSeq) {
+      return startCacheFlush(encodedRegionName, flushedFamilyNamesToSeq.keySet());
+    }
+
     @Override
     public Long startCacheFlush(final byte[] encodedRegionName, Set<byte[]> flushedFamilyNames) {
       if (closed.get()) return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f254e278/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
index 030d8b6..b7adc60 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WAL.java
@@ -161,6 +161,8 @@ public interface WAL extends Closeable {
    */
   Long startCacheFlush(final byte[] encodedRegionName, Set<byte[]> families);
 
+  Long startCacheFlush(final byte[] encodedRegionName, Map<byte[], Long> familyToSeq);
+
   /**
    * Complete the cache flush.
    * @param encodedRegionName Encoded region name.


[32/50] [abbrv] hbase git commit: HBASE-10699 Set capacity on ArrayList where possible and use isEmpty instead of size() == 0

Posted by el...@apache.org.
HBASE-10699 Set capacity on ArrayList where possible and use isEmpty instead of size() == 0

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55a1aa1e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55a1aa1e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55a1aa1e

Branch: refs/heads/HBASE-16961
Commit: 55a1aa1e73b5bda877facbcfce6ff7d52b35ecec
Parents: c74cf12
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sat Jan 21 00:38:03 2017 +0100
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 22:58:20 2017 -0800

----------------------------------------------------------------------
 .../classification/tools/StabilityOptions.java  |   2 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   |   2 +-
 .../apache/hadoop/hbase/MetaTableAccessor.java  |   4 +-
 .../org/apache/hadoop/hbase/client/Append.java  |   2 +-
 .../hadoop/hbase/client/AsyncProcess.java       |   4 +-
 .../hbase/client/AsyncRequestFutureImpl.java    |   2 +-
 .../org/apache/hadoop/hbase/client/Delete.java  |  10 +-
 .../org/apache/hadoop/hbase/client/Get.java     |   4 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   4 +-
 .../apache/hadoop/hbase/client/Mutation.java    |   4 +-
 .../org/apache/hadoop/hbase/client/Scan.java    |   2 +-
 .../replication/ReplicationSerDeHelper.java     |   7 +-
 .../hadoop/hbase/filter/CompareFilter.java      |   2 +-
 .../hadoop/hbase/filter/TimestampsFilter.java   |   2 +-
 .../replication/ReplicationPeersZKImpl.java     |   2 +-
 .../replication/ReplicationQueuesZKImpl.java    |   8 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   4 +-
 .../shaded/protobuf/ResponseConverter.java      |   2 +-
 .../org/apache/hadoop/hbase/util/Writables.java |   2 +-
 .../hbase/zookeeper/MetaTableLocator.java       |   2 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   |  24 ++---
 .../hadoop/hbase/filter/TestKeyOnlyFilter.java  |   2 +-
 .../hbase/ipc/TestHBaseRpcControllerImpl.java   |   3 +-
 .../org/apache/hadoop/hbase/ChoreService.java   |   2 +-
 .../java/org/apache/hadoop/hbase/KeyValue.java  |   2 +-
 .../hadoop/hbase/util/AbstractHBaseTool.java    |   2 +-
 .../apache/hadoop/hbase/ResourceChecker.java    |   4 +-
 .../hbase/util/ClassLoaderTestHelper.java       |   4 +-
 .../org/apache/hadoop/hbase/util/TestBytes.java |   4 +-
 .../client/coprocessor/AggregationClient.java   |   4 +-
 .../coprocessor/TestRowProcessorEndpoint.java   |   6 +-
 .../SecureBulkLoadEndpointClient.java           |   2 +-
 .../apache/hadoop/hbase/thrift/DemoClient.java  |  14 +--
 .../hadoop/hbase/thrift/HttpDoAsClient.java     |   2 +-
 .../apache/hadoop/hbase/thrift2/DemoClient.java |   2 +-
 .../metrics2/util/MetricSampleQuantiles.java    |   4 +-
 .../hbase/IntegrationTestDDLMasterFailover.java |   2 +-
 .../hadoop/hbase/IntegrationTestIngest.java     |   2 +-
 ...tegrationTestIngestWithVisibilityLabels.java |  14 +--
 .../hbase/ipc/IntegrationTestRpcClient.java     |   2 +-
 .../test/IntegrationTestBigLinkedList.java      |   2 +-
 .../row/data/TestRowDataExerciseFInts.java      |   2 +-
 .../row/data/TestRowDataTrivialWithTags.java    |   2 +-
 .../prefixtree/row/data/TestRowDataUrls.java    |   2 +-
 .../timestamp/data/TestTimestampDataBasic.java  |   4 +-
 .../data/TestTimestampDataNumbers.java          |   4 +-
 .../data/TestTimestampDataRepeats.java          |   2 +-
 .../store/wal/ProcedureWALPrettyPrinter.java    |   2 +-
 .../hadoop/hbase/rest/MultiRowResource.java     |   2 +-
 .../hadoop/hbase/rest/client/RemoteHTable.java  |   2 +-
 .../rest/model/NamespacesInstanceModel.java     |   2 +-
 .../hbase/rest/model/NamespacesModel.java       |   2 +-
 .../hadoop/hbase/rest/model/ScannerModel.java   |   2 +-
 .../hadoop/hbase/rest/TestMultiRowResource.java |   2 +-
 .../hbase/rest/TestScannersWithFilters.java     |   2 +-
 .../hbase/rest/TestScannersWithLabels.java      |   2 +-
 .../hadoop/hbase/rest/TestSchemaResource.java   |   2 +-
 .../hbase/rest/client/TestRemoteTable.java      |  14 +--
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |   2 +-
 .../hadoop/hbase/rsgroup/TestRSGroupsBase.java  |   4 +-
 .../hadoop/hbase/backup/HFileArchiver.java      |   4 +-
 .../hbase/errorhandling/ForeignException.java   |   2 +-
 .../favored/FavoredNodeAssignmentHelper.java    |   4 +-
 .../hadoop/hbase/favored/FavoredNodesPlan.java  |   2 +-
 .../apache/hadoop/hbase/http/HttpServer.java    |   6 +-
 .../hbase/mapreduce/TableMapReduceUtil.java     |   2 +-
 .../hadoop/hbase/master/CatalogJanitor.java     |   2 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   2 +-
 .../hadoop/hbase/master/RegionStates.java       |   2 +-
 .../hadoop/hbase/master/ServerManager.java      |   2 +-
 .../hadoop/hbase/master/SplitLogManager.java    |   2 +-
 .../master/balancer/SimpleLoadBalancer.java     |   2 +-
 .../hbase/regionserver/CompactionTool.java      |   2 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +-
 .../hadoop/hbase/regionserver/HStore.java       |   2 +-
 .../compactions/ExploringCompactionPolicy.java  |   2 +-
 .../compactions/StripeCompactionPolicy.java     |   2 +-
 .../querymatcher/LegacyScanQueryMatcher.java    |   2 +-
 .../querymatcher/UserScanQueryMatcher.java      |   2 +-
 .../regionserver/wal/WALEditsReplaySink.java    |   2 +-
 .../replication/master/TableCFsUpdater.java     |   2 +-
 .../regionserver/ReplicationSink.java           |   6 +-
 .../regionserver/ReplicationSource.java         |   4 +-
 .../regionserver/ReplicationSourceManager.java  |   2 +-
 .../DefaultVisibilityLabelServiceImpl.java      |   2 +-
 .../hbase/snapshot/RestoreSnapshotHelper.java   |   8 +-
 .../hadoop/hbase/snapshot/SnapshotInfo.java     |   2 +-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |   2 +-
 .../hbase/snapshot/SnapshotReferenceUtil.java   |   6 +-
 .../org/apache/hadoop/hbase/tool/Canary.java    |   2 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  12 +--
 .../org/apache/hadoop/hbase/util/IdLock.java    |   2 +-
 .../apache/hadoop/hbase/util/RegionMover.java   |   8 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |  12 +--
 .../hadoop/hbase/wal/AbstractFSWALProvider.java |   2 +-
 .../hadoop/hbase/wal/DisabledWALProvider.java   |   2 +-
 .../org/apache/hadoop/hbase/wal/WALKey.java     |   2 +-
 .../hadoop/hbase/wal/WALPrettyPrinter.java      |   4 +-
 .../hbase/zookeeper/MiniZooKeeperCluster.java   |   2 +-
 .../hbase/TestPartialResultsFromClientSide.java |   2 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   2 +-
 .../client/TestScannersFromClientSide.java      |   2 +-
 .../master/TestDistributedLogSplitting.java     |  16 +--
 .../hbase/master/TestRegionPlacement.java       |  10 +-
 .../hbase/regionserver/TestColumnSeeking.java   |   4 +-
 .../TestDefaultCompactSelection.java            |   2 +-
 .../TestEndToEndSplitTransaction.java           |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   2 +-
 .../TestSplitTransactionOnCluster.java          |   2 +-
 .../TestCompactedHFilesDischarger.java          |   8 +-
 .../compactions/TestStripeCompactionPolicy.java |   4 +-
 .../compactions/TestStripeCompactor.java        |   2 +-
 .../querymatcher/TestExplicitColumnTracker.java |  10 +-
 .../TestScanWildcardColumnTracker.java          |  14 +--
 .../querymatcher/TestUserScanQueryMatcher.java  |  12 +--
 .../regionserver/wal/AbstractTestFSWAL.java     |   2 +-
 .../hbase/regionserver/wal/TestLogRolling.java  |   2 +-
 .../wal/TestWALActionsListener.java             |   2 +-
 .../replication/TestMasterReplication.java      |   4 +-
 .../replication/TestMultiSlaveReplication.java  |   2 +-
 .../replication/TestNamespaceReplication.java   |   2 +-
 .../replication/TestPerTableCFReplication.java  |   2 +-
 ...estReplicationChangingPeerRegionservers.java |   2 +-
 .../TestReplicationDisableInactivePeer.java     |   2 +-
 .../replication/TestReplicationSmallTests.java  |  12 +--
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../replication/TestReplicationWithTags.java    |   4 +-
 .../replication/TestSerialReplication.java      |   2 +-
 .../regionserver/TestGlobalThrottler.java       |   2 +-
 .../TestReplicationSourceManager.java           |   2 +-
 .../TestReplicationWALReaderManager.java        |   2 +-
 .../security/access/TestAccessController.java   |   6 +-
 .../access/TestZKPermissionsWatcher.java        |   4 +-
 .../visibility/TestVisibilityLabels.java        |   8 +-
 ...sibilityLabelsOpWithDifferentUsersNoACL.java |   2 +-
 .../TestVisibilityLabelsReplication.java        |   4 +-
 .../visibility/TestVisibilityLabelsWithACL.java |   4 +-
 .../TestVisibilityLabelsWithDeletes.java        |  10 +-
 .../TestVisibilityLablesWithGroups.java         |   4 +-
 .../TestWithDisabledAuthorization.java          |   4 +-
 .../snapshot/TestExportSnapshotHelpers.java     |   2 +-
 .../hadoop/hbase/util/BaseTestHBaseFsck.java    |   6 +-
 .../hbase/util/HFileArchiveTestingUtil.java     |   2 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   2 +-
 .../hbase/util/TestRegionSizeCalculator.java    |   2 +-
 .../hbase/util/TestRegionSplitCalculator.java   |   2 +-
 .../hadoop/hbase/util/TestRegionSplitter.java   |   6 +-
 .../apache/hadoop/hbase/wal/IOTestProvider.java |   2 +-
 .../hbase/zookeeper/TestZooKeeperACL.java       |   2 +-
 .../hadoop/hbase/client/AbstractTestShell.java  |   2 +-
 .../hadoop/hbase/client/TestShellNoCluster.java |   2 +-
 .../hbase/client/rsgroup/TestShellRSGroups.java |   2 +-
 .../JavaHBaseBulkDeleteExample.java             |   2 +-
 .../hbasecontext/JavaHBaseBulkGetExample.java   |   2 +-
 .../hbasecontext/JavaHBaseBulkPutExample.java   |   2 +-
 .../hbasecontext/JavaHBaseMapGetPutExample.java |   2 +-
 .../spark/datasources/HBaseTableScanRDD.scala   |   2 +-
 .../hbase/spark/TestJavaHBaseContext.java       |  10 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java |   4 +-
 .../hadoop/hbase/thrift/ThriftUtilities.java    |   2 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |   6 +-
 .../hbase/thrift/TestThriftHttpServer.java      |   2 +-
 .../hadoop/hbase/thrift/TestThriftServer.java   |  24 ++---
 .../thrift2/TestThriftHBaseServiceHandler.java  | 100 +++++++++----------
 ...TestThriftHBaseServiceHandlerWithLabels.java |  34 +++----
 165 files changed, 389 insertions(+), 381 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
index 0911fd5..6cb03a5 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
@@ -53,7 +53,7 @@ final class StabilityOptions {
   }
 
   public static String[][] filterOptions(String[][] options) {
-    List<String[]> optionsList = new ArrayList<String[]>();
+    List<String[]> optionsList = new ArrayList<String[]>(options.length);
     for (int i = 0; i < options.length; i++) {
       if (!options[i][0].equalsIgnoreCase(UNSTABLE_OPTION)
           && !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index be8e858..60b85fe 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -1387,7 +1387,7 @@ public class HTableDescriptor implements Comparable<HTableDescriptor> {
    * @return The list of co-processors classNames
    */
   public List<String> getCoprocessors() {
-    List<String> result = new ArrayList<String>();
+    List<String> result = new ArrayList<String>(this.values.entrySet().size());
     Matcher keyMatcher;
     for (Map.Entry<Bytes, Bytes> e : this.values.entrySet()) {
       keyMatcher = HConstants.CP_HTD_ATTR_KEY_PATTERN.matcher(Bytes.toString(e.getKey().get()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 4963355..1cc7963 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1897,7 +1897,7 @@ public class MetaTableAccessor {
    */
   public static void updateReplicationPositions(Connection connection, String peerId,
       Map<String, Long> positions) throws IOException {
-    List<Put> puts = new ArrayList<>();
+    List<Put> puts = new ArrayList<>(positions.entrySet().size());
     for (Map.Entry<String, Long> entry : positions.entrySet()) {
       long value = Math.abs(entry.getValue());
       Put put = new Put(Bytes.toBytes(entry.getKey()));
@@ -2169,7 +2169,7 @@ public class MetaTableAccessor {
       Result result;
       while ((result = scanner.next()) != null) {
         String key = Bytes.toString(result.getRow());
-        List<Long> list = new ArrayList<>();
+        List<Long> list = new ArrayList<>(result.rawCells().length);
         for (Cell cell : result.rawCells()) {
           list.add(Bytes.toLong(cell.getQualifierArray(), cell.getQualifierOffset(),
               cell.getQualifierLength()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
index 45f1e46..fd2df93 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java
@@ -123,7 +123,7 @@ public class Append extends Mutation {
     byte [] family = CellUtil.cloneFamily(cell);
     List<Cell> list = this.familyMap.get(family);
     if (list == null) {
-      list  = new ArrayList<Cell>();
+      list  = new ArrayList<Cell>(1);
     }
     // find where the new entry should be placed in the List
     list.add(cell);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index d1583f5..269d316 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -287,8 +287,8 @@ class AsyncProcess {
           }
           loc = locs.getDefaultRegionLocation();
         } catch (IOException ex) {
-          locationErrors = new ArrayList<Exception>();
-          locationErrorRows = new ArrayList<Integer>();
+          locationErrors = new ArrayList<Exception>(1);
+          locationErrorRows = new ArrayList<Integer>(1);
           LOG.error("Failed to get region location ", ex);
           // This action failed before creating ars. Retain it, but do not add to submit list.
           // We will then add it to ars in an already-failed state.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index b0a2798..c3caff8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -463,7 +463,7 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
       if (loc == null || loc.getServerName() == null) {
         if (isReplica) {
           if (unknownReplicaActions == null) {
-            unknownReplicaActions = new ArrayList<Action>();
+            unknownReplicaActions = new ArrayList<Action>(1);
           }
           unknownReplicaActions.add(action);
         } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index d61a197..9c6c1a5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -172,7 +172,7 @@ public class Delete extends Mutation implements Comparable<Row> {
     byte [] family = CellUtil.cloneFamily(kv);
     List<Cell> list = familyMap.get(family);
     if (list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<Cell>(1);
     }
     list.add(kv);
     familyMap.put(family, list);
@@ -209,7 +209,7 @@ public class Delete extends Mutation implements Comparable<Row> {
     }
     List<Cell> list = familyMap.get(family);
     if(list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<Cell>(1);
     } else if(!list.isEmpty()) {
       list.clear();
     }
@@ -229,7 +229,7 @@ public class Delete extends Mutation implements Comparable<Row> {
   public Delete addFamilyVersion(final byte [] family, final long timestamp) {
     List<Cell> list = familyMap.get(family);
     if(list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<Cell>(1);
     }
     list.add(new KeyValue(row, family, null, timestamp,
           KeyValue.Type.DeleteFamilyVersion));
@@ -262,7 +262,7 @@ public class Delete extends Mutation implements Comparable<Row> {
     }
     List<Cell> list = familyMap.get(family);
     if (list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<Cell>(1);
     }
     list.add(new KeyValue(this.row, family, qualifier, timestamp,
         KeyValue.Type.DeleteColumn));
@@ -297,7 +297,7 @@ public class Delete extends Mutation implements Comparable<Row> {
     }
     List<Cell> list = familyMap.get(family);
     if(list == null) {
-      list = new ArrayList<Cell>();
+      list = new ArrayList<Cell>(1);
     }
     KeyValue kv = new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.Delete);
     list.add(kv);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
index 4dc656f..947b54a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java
@@ -400,7 +400,7 @@ public class Get extends Query
   @Override
   public Map<String, Object> getFingerprint() {
     Map<String, Object> map = new HashMap<String, Object>();
-    List<String> families = new ArrayList<String>();
+    List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
     map.put("families", families);
     for (Map.Entry<byte [], NavigableSet<byte[]>> entry :
       this.familyMap.entrySet()) {
@@ -428,7 +428,7 @@ public class Get extends Query
     map.put("row", Bytes.toStringBinary(this.row));
     map.put("maxVersions", this.maxVersions);
     map.put("cacheBlocks", this.cacheBlocks);
-    List<Long> timeRange = new ArrayList<Long>();
+    List<Long> timeRange = new ArrayList<Long>(2);
     timeRange.add(this.tr.getMin());
     timeRange.add(this.tr.getMax());
     map.put("timeRange", timeRange);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index db9cea5..9befc48 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3865,7 +3865,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void drainRegionServers(List<ServerName> servers) throws IOException {
-    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
     for (ServerName server : servers) {
       // Parse to ServerName to do simple validation.
       ServerName.parseServerName(server.toString());
@@ -3902,7 +3902,7 @@ public class HBaseAdmin implements Admin {
 
   @Override
   public void removeDrainFromRegionServers(List<ServerName> servers) throws IOException {
-    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>();
+    final List<HBaseProtos.ServerName> pbServers = new ArrayList<HBaseProtos.ServerName>(servers.size());
     for (ServerName server : servers) {
       pbServers.add(ProtobufUtil.toServerName(server));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 42c159e..53631d9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -159,7 +159,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
   @Override
   public Map<String, Object> getFingerprint() {
     Map<String, Object> map = new HashMap<String, Object>();
-    List<String> families = new ArrayList<String>();
+    List<String> families = new ArrayList<String>(this.familyMap.entrySet().size());
     // ideally, we would also include table information, but that information
     // is not stored in each Operation instance.
     map.put("families", families);
@@ -227,7 +227,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
     stringMap.put("vlen", c.getValueLength());
     List<Tag> tags = CellUtil.getTags(c);
     if (tags != null) {
-      List<String> tagsString = new ArrayList<String>();
+      List<String> tagsString = new ArrayList<String>(tags.size());
       for (Tag t : tags) {
         tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(TagUtil.cloneValue(t)));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index a2d9037..8d53b9a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -908,7 +908,7 @@ public class Scan extends Query {
     map.put("maxResultSize", this.maxResultSize);
     map.put("cacheBlocks", this.cacheBlocks);
     map.put("loadColumnFamiliesOnDemand", this.loadColumnFamiliesOnDemand);
-    List<Long> timeRange = new ArrayList<Long>();
+    List<Long> timeRange = new ArrayList<Long>(2);
     timeRange.add(this.tr.getMin());
     timeRange.add(this.tr.getMax());
     map.put("timeRange", timeRange);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
index 93eea17..2965219 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationSerDeHelper.java
@@ -68,7 +68,7 @@ public final class ReplicationSerDeHelper {
     if (tableCfs == null) {
       return null;
     }
-    List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>();
+    List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>(tableCfs.entrySet().size());
     ReplicationProtos.TableCF.Builder tableCFBuilder =  ReplicationProtos.TableCF.newBuilder();
     for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
       tableCFBuilder.clear();
@@ -100,10 +100,11 @@ public final class ReplicationSerDeHelper {
     if (tableCFsConfig == null || tableCFsConfig.trim().length() == 0) {
       return null;
     }
-    List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>();
-    ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
 
+    ReplicationProtos.TableCF.Builder tableCFBuilder = ReplicationProtos.TableCF.newBuilder();
     String[] tables = tableCFsConfig.split(";");
+    List<ReplicationProtos.TableCF> tableCFList = new ArrayList<>(tables.length);
+
     for (String tab : tables) {
       // 1 ignore empty table config
       tab = tab.trim();

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
index 6b64e0d..e74797d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/CompareFilter.java
@@ -177,7 +177,7 @@ public abstract class CompareFilter extends FilterBase {
                                             " can only be used with EQUAL and NOT_EQUAL");
       }
     }
-    ArrayList<Object> arguments = new ArrayList<Object>();
+    ArrayList<Object> arguments = new ArrayList<Object>(2);
     arguments.add(compareOp);
     arguments.add(comparator);
     return arguments;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
index 2f10d9a..921b7b4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java
@@ -157,7 +157,7 @@ public class TimestampsFilter extends FilterBase {
   }
 
   public static Filter createFilterFromArguments(ArrayList<byte []> filterArguments) {
-    ArrayList<Long> timestamps = new ArrayList<Long>();
+    ArrayList<Long> timestamps = new ArrayList<Long>(filterArguments.size());
     for (int i = 0; i<filterArguments.size(); i++) {
       long timestamp = ParseFilter.convertByteArrayToLong(filterArguments.get(i));
       timestamps.add(timestamp);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index a4b09c3..cf5be83 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -128,7 +128,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
 
       ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
 
-      List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
+      List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(2);
       ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
         ReplicationSerDeHelper.toByteArray(peerConfig));
       // b/w PeerWatcher and ReplicationZookeeper#add method to create the

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 1de1315..484084e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -327,8 +327,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     if (debugEnabled) {
       LOG.debug("Adding hfile references " + pairs + " in queue " + peerZnode);
     }
-    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
+
     int size = pairs.size();
+    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
+
     for (int i = 0; i < size; i++) {
       listOfOps.add(ZKUtilOp.createAndFailSilent(
         ZKUtil.joinZNode(peerZnode, pairs.get(i).getSecond().getName()),
@@ -352,8 +354,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     if (debugEnabled) {
       LOG.debug("Removing hfile references " + files + " from queue " + peerZnode);
     }
-    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
+
     int size = files.size();
+    List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>(size);
+
     for (int i = 0; i < size; i++) {
       listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i))));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index d862d5f..7764f65 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -1875,7 +1875,7 @@ public final class ProtobufUtil {
    */
   static List<HRegionInfo> getRegionInfos(final GetOnlineRegionResponse proto) {
     if (proto == null) return null;
-    List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>();
+    List<HRegionInfo> regionInfos = new ArrayList<HRegionInfo>(proto.getRegionInfoList().size());
     for (RegionInfo regionInfo: proto.getRegionInfoList()) {
       regionInfos.add(HRegionInfo.convert(regionInfo));
     }
@@ -2691,7 +2691,7 @@ public final class ProtobufUtil {
 
   public static List<ReplicationLoadSource> toReplicationLoadSourceList(
       List<ClusterStatusProtos.ReplicationLoadSource> clsList) {
-    ArrayList<ReplicationLoadSource> rlsList = new ArrayList<ReplicationLoadSource>();
+    ArrayList<ReplicationLoadSource> rlsList = new ArrayList<ReplicationLoadSource>(clsList.size());
     for (ClusterStatusProtos.ReplicationLoadSource cls : clsList) {
       rlsList.add(toReplicationLoadSource(cls));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
index 760f630..a83667d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
@@ -232,7 +232,7 @@ public final class ResponseConverter {
   public static List<RegionOpeningState> getRegionOpeningStateList(
       final OpenRegionResponse proto) {
     if (proto == null) return null;
-    List<RegionOpeningState> regionOpeningStates = new ArrayList<RegionOpeningState>();
+    List<RegionOpeningState> regionOpeningStates = new ArrayList<RegionOpeningState>(proto.getOpeningStateCount());
     for (int i = 0; i < proto.getOpeningStateCount(); i++) {
       regionOpeningStates.add(RegionOpeningState.valueOf(
           proto.getOpeningState(i).name()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
index e04d789..940d523 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
@@ -68,7 +68,7 @@ public class Writables {
    * @throws IOException e
    */
   public static byte [] getBytes(final Writable... ws) throws IOException {
-    List<byte []> bytes = new ArrayList<byte []>();
+    List<byte []> bytes = new ArrayList<byte []>(ws.length);
     int size = 0;
     for (Writable w: ws) {
       byte [] b = getBytes(w);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
index c18811e..e8431a2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaTableLocator.java
@@ -106,7 +106,7 @@ public class MetaTableLocator {
   public List<Pair<HRegionInfo, ServerName>> getMetaRegionsAndLocations(ZooKeeperWatcher zkw,
       int replicaId) {
     ServerName serverName = getMetaRegionLocation(zkw, replicaId);
-    List<Pair<HRegionInfo, ServerName>> list = new ArrayList<Pair<HRegionInfo, ServerName>>();
+    List<Pair<HRegionInfo, ServerName>> list = new ArrayList<Pair<HRegionInfo, ServerName>>(1);
     list.add(new Pair<HRegionInfo, ServerName>(RegionReplicaUtil.getRegionInfoForReplica(
         HRegionInfo.FIRST_META_REGIONINFO, replicaId), serverName));
     return list;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index ed7202a..9a09aeb 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -682,7 +682,7 @@ public class TestAsyncProcess {
     ClusterConnection hc = createHConnection();
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     puts.add(createPut(1, true));
 
     ap.submit(null, DUMMY_TABLE, puts, false, null, false);
@@ -701,7 +701,7 @@ public class TestAsyncProcess {
     };
     MyAsyncProcess ap = new MyAsyncProcess(hc, CONF);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     puts.add(createPut(1, true));
 
     final AsyncRequestFuture ars = ap.submit(null, DUMMY_TABLE, puts, false, cb, false);
@@ -718,7 +718,7 @@ public class TestAsyncProcess {
       SimpleRequestController.class.getName());
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF);
     SimpleRequestController controller = (SimpleRequestController) ap.requestController;
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     puts.add(createPut(1, true));
 
     for (int i = 0; i != controller.maxConcurrentTasksPerRegion; ++i) {
@@ -747,7 +747,7 @@ public class TestAsyncProcess {
     SimpleRequestController controller = (SimpleRequestController) ap.requestController;
     controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer));
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(4);
     puts.add(createPut(1, true));
     puts.add(createPut(3, true)); // <== this one won't be taken, the rs is busy
     puts.add(createPut(1, true)); // <== this one will make it, the region is already in
@@ -769,7 +769,7 @@ public class TestAsyncProcess {
   public void testFail() throws Exception {
     MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     Put p = createPut(1, false);
     puts.add(p);
 
@@ -817,7 +817,7 @@ public class TestAsyncProcess {
       }
     };
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     Put p = createPut(1, true);
     puts.add(p);
 
@@ -843,7 +843,7 @@ public class TestAsyncProcess {
   public void testFailAndSuccess() throws Exception {
     MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(3);
     puts.add(createPut(1, false));
     puts.add(createPut(1, true));
     puts.add(createPut(1, true));
@@ -870,7 +870,7 @@ public class TestAsyncProcess {
   public void testFlush() throws Exception {
     MyAsyncProcess ap = new MyAsyncProcess(createHConnection(), CONF, false);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(3);
     puts.add(createPut(1, false));
     puts.add(createPut(1, true));
     puts.add(createPut(1, true));
@@ -955,7 +955,7 @@ public class TestAsyncProcess {
       }
     };
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     puts.add(createPut(1, true));
 
     t.start();
@@ -1171,7 +1171,7 @@ public class TestAsyncProcess {
     HTable ht = new HTable(conn, mutator);
     ht.multiAp = new MyAsyncProcess(conn, CONF, false);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(7);
     puts.add(createPut(1, true));
     puts.add(createPut(1, true));
     puts.add(createPut(1, true));
@@ -1517,7 +1517,7 @@ public class TestAsyncProcess {
   }
 
   private static List<Get> makeTimelineGets(byte[]... rows) {
-    List<Get> result = new ArrayList<Get>();
+    List<Get> result = new ArrayList<Get>(rows.length);
     for (byte[] row : rows) {
       Get get = new Get(row);
       get.setConsistency(Consistency.TIMELINE);
@@ -1611,7 +1611,7 @@ public class TestAsyncProcess {
             new LinkedBlockingQueue<Runnable>(200));
     AsyncProcess ap = new AsyncProcessForThrowableCheck(hc, CONF);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(1);
     puts.add(createPut(1, true));
     AsyncProcessTask task = AsyncProcessTask.newBuilder()
             .setPool(myPool)

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
index 0e88c4c..e93319a 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java
@@ -48,7 +48,7 @@ public class TestKeyOnlyFilter {
 
   @Parameters
   public static Collection<Object[]> parameters() {
-    List<Object[]> paramList = new ArrayList<Object[]>();
+    List<Object[]> paramList = new ArrayList<Object[]>(2);
     {
       paramList.add(new Object[] { false });
       paramList.add(new Object[] { true });

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
index d9535a6..0659f30 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
@@ -39,8 +39,9 @@ public class TestHBaseRpcControllerImpl {
 
   @Test
   public void testListOfCellScannerables() throws IOException {
-    List<CellScannable> cells = new ArrayList<CellScannable>();
     final int count = 10;
+    List<CellScannable> cells = new ArrayList<CellScannable>(count);
+
     for (int i = 0; i < count; i++) {
       cells.add(createCell(i));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
index 1623c10..99dc163 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ChoreService.java
@@ -348,7 +348,7 @@ public class ChoreService implements ChoreServicer {
   }
 
   private void cancelAllChores(final boolean mayInterruptIfRunning) {
-    ArrayList<ScheduledChore> choresToCancel = new ArrayList<ScheduledChore>();
+    ArrayList<ScheduledChore> choresToCancel = new ArrayList<ScheduledChore>(scheduledChores.keySet().size());
     // Build list of chores to cancel so we can iterate through a set that won't change
     // as chores are cancelled. If we tried to cancel each chore while iterating through
     // keySet the results would be undefined because the keySet would be changing

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 51038ed..0434820 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -1184,7 +1184,7 @@ public class KeyValue implements ExtendedCell {
     stringMap.put("vlen", getValueLength());
     List<Tag> tags = getTags();
     if (tags != null) {
-      List<String> tagsString = new ArrayList<String>();
+      List<String> tagsString = new ArrayList<String>(tags.size());
       for (Tag t : tags) {
         tagsString.add(t.toString());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
index a790920..a51a80f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractHBaseTool.java
@@ -116,7 +116,7 @@ public abstract class AbstractHBaseTool implements Tool, Configurable {
     }
 
     CommandLine cmd;
-    List<String> argsList = new ArrayList<>();
+    List<String> argsList = new ArrayList<>(args.length);
     for (String arg : args) {
       argsList.add(arg);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
index ee0380a..310a2fb 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ResourceChecker.java
@@ -180,7 +180,7 @@ public class ResourceChecker {
    * - logs them.
    */
   public void start() {
-    if (ras.size() == 0) {
+    if (ras.isEmpty()) {
       LOG.info("No resource analyzer");
       return;
     }
@@ -197,7 +197,7 @@ public class ResourceChecker {
    * - logs them.
    */
   public void end() {
-    if (ras.size() == 0) {
+    if (ras.isEmpty()) {
       LOG.info("No resource analyzer");
       return;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java
index ef213ee..30e33d9 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/ClassLoaderTestHelper.java
@@ -133,13 +133,13 @@ public class ClassLoaderTestHelper {
 
     // compile it by JavaCompiler
     JavaCompiler compiler = ToolProvider.getSystemJavaCompiler();
-    ArrayList<String> srcFileNames = new ArrayList<String>();
+    ArrayList<String> srcFileNames = new ArrayList<String>(1);
     srcFileNames.add(sourceCodeFile.toString());
     StandardJavaFileManager fm = compiler.getStandardFileManager(null, null,
       null);
     Iterable<? extends JavaFileObject> cu =
       fm.getJavaFileObjects(sourceCodeFile);
-    List<String> options = new ArrayList<String>();
+    List<String> options = new ArrayList<String>(2);
     options.add("-classpath");
     // only add hbase classes to classpath. This is a little bit tricky: assume
     // the classpath is {hbaseSrc}/target/classes.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
index 42afb28..e145642 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestBytes.java
@@ -498,7 +498,7 @@ public class TestBytes extends TestCase {
   }
   
   public void testToFromHex() {
-    List<String> testStrings = new ArrayList<String>();
+    List<String> testStrings = new ArrayList<String>(8);
     testStrings.addAll(Arrays.asList(new String[] {
         "",
         "00",
@@ -517,7 +517,7 @@ public class TestBytes extends TestCase {
       Assert.assertTrue(testString.equalsIgnoreCase(result));
     }
     
-    List<byte[]> testByteData = new ArrayList<byte[]>();
+    List<byte[]> testByteData = new ArrayList<byte[]>(5);
     testByteData.addAll(Arrays.asList(new byte[][] {
       new byte[0],
       new byte[1],

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
index d236342..1eda730 100644
--- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
+++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java
@@ -587,7 +587,7 @@ public class AggregationClient implements Closeable {
       S sumVal = null, sumSqVal = null;
 
       public synchronized Pair<List<S>, Long> getStdParams() {
-        List<S> l = new ArrayList<S>();
+        List<S> l = new ArrayList<S>(2);
         l.add(sumVal);
         l.add(sumSqVal);
         Pair<List<S>, Long> p = new Pair<List<S>, Long>(l, rowCountVal);
@@ -704,7 +704,7 @@ public class AggregationClient implements Closeable {
       S sumVal = null, sumWeights = null;
 
       public synchronized Pair<NavigableMap<byte[], List<S>>, List<S>> getMedianParams() {
-        List<S> l = new ArrayList<S>();
+        List<S> l = new ArrayList<S>(2);
         l.add(sumVal);
         l.add(sumWeights);
         Pair<NavigableMap<byte[], List<S>>, List<S>> p =

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
index ade738e..7bb6e2b 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java
@@ -353,7 +353,7 @@ public class TestRowProcessorEndpoint {
         Scan scan = new Scan(row, row);
         scan.addColumn(FAM, COUNTER);
         doScan(region, scan, kvs);
-        counter = kvs.size() == 0 ? 0 :
+        counter = kvs.isEmpty() ? 0 :
           Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next()));
 
         // Assert counter value
@@ -497,7 +497,7 @@ public class TestRowProcessorEndpoint {
 
       @Override
       public Collection<byte[]> getRowsToLock() {
-        List<byte[]> rows = new ArrayList<byte[]>();
+        List<byte[]> rows = new ArrayList<byte[]>(2);
         rows.add(row1);
         rows.add(row2);
         return rows;
@@ -538,7 +538,7 @@ public class TestRowProcessorEndpoint {
         swapped = !swapped;
 
         // Add and delete keyvalues
-        List<List<Cell>> kvs = new ArrayList<List<Cell>>();
+        List<List<Cell>> kvs = new ArrayList<List<Cell>>(2);
         kvs.add(kvs1);
         kvs.add(kvs2);
         byte[][] rows = new byte[][]{row1, row2};

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
----------------------------------------------------------------------
diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
index a82900d..06e45eb 100644
--- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
+++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java
@@ -133,7 +133,7 @@ public class SecureBulkLoadEndpointClient {
       }
 
       List<ClientProtos.BulkLoadHFileRequest.FamilyPath> protoFamilyPaths =
-          new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>();
+          new ArrayList<ClientProtos.BulkLoadHFileRequest.FamilyPath>(familyPaths.size());
       for(Pair<byte[], String> el: familyPaths) {
         protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder()
           .setFamily(ByteStringer.wrap(el.getFirst()))

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
index 64adc93..b16ef7b 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/DemoClient.java
@@ -154,7 +154,7 @@ public class DemoClient {
         //
         // Create the demo table with two column families, entry: and unused:
         //
-        ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
+        ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>(2);
         ColumnDescriptor col;
         col = new ColumnDescriptor();
         col.name = ByteBuffer.wrap(bytes("entry:"));
@@ -194,7 +194,7 @@ public class DemoClient {
 
         ArrayList<Mutation> mutations;
         // non-utf8 is fine for data
-        mutations = new ArrayList<Mutation>();
+        mutations = new ArrayList<Mutation>(1);
         mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")),
             ByteBuffer.wrap(invalid), writeToWal));
         client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(bytes("foo")),
@@ -202,13 +202,13 @@ public class DemoClient {
 
 
         // this row name is valid utf8
-        mutations = new ArrayList<Mutation>();
+        mutations = new ArrayList<Mutation>(1);
         mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(valid), writeToWal));
         client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(valid), mutations, dummyAttributes);
 
         // non-utf8 is now allowed in row names because HBase stores values as binary
 
-        mutations = new ArrayList<Mutation>();
+        mutations = new ArrayList<Mutation>(1);
         mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(invalid), writeToWal));
         client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(invalid), mutations, dummyAttributes);
 
@@ -238,7 +238,7 @@ public class DemoClient {
             nf.setGroupingUsed(false);
             byte[] row = bytes(nf.format(i));
 
-            mutations = new ArrayList<Mutation>();
+            mutations = new ArrayList<Mutation>(1);
             mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("unused:")), ByteBuffer.wrap(bytes("DELETE_ME")), writeToWal));
             client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
             printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
@@ -251,14 +251,14 @@ public class DemoClient {
                 // no-op
             }
 
-            mutations = new ArrayList<Mutation>();
+            mutations = new ArrayList<Mutation>(2);
             mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:num")), ByteBuffer.wrap(bytes("0")), writeToWal));
             mutations.add(new Mutation(false, ByteBuffer.wrap(bytes("entry:foo")), ByteBuffer.wrap(bytes("FOO")), writeToWal));
             client.mutateRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), mutations, dummyAttributes);
             printRow(client.getRow(ByteBuffer.wrap(t), ByteBuffer.wrap(row), dummyAttributes));
 
             Mutation m;
-            mutations = new ArrayList<Mutation>();
+            mutations = new ArrayList<Mutation>(2);
             m = new Mutation();
             m.column = ByteBuffer.wrap(bytes("entry:foo"));
             m.isDelete = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
index df18fed..666891c 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
@@ -151,7 +151,7 @@ public class HttpDoAsClient {
     //
     // Create the demo table with two column families, entry: and unused:
     //
-    ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>();
+    ArrayList<ColumnDescriptor> columns = new ArrayList<ColumnDescriptor>(2);
     ColumnDescriptor col;
     col = new ColumnDescriptor();
     col.name = ByteBuffer.wrap(bytes("entry:"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java
index e96688e..4083792 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/thrift2/DemoClient.java
@@ -126,7 +126,7 @@ public class DemoClient {
     columnValue.setFamily("family1".getBytes());
     columnValue.setQualifier("qualifier1".getBytes());
     columnValue.setValue("value1".getBytes());
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     put.setColumnValues(columnValues);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
index 96c79ae..a968aca 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java
@@ -134,7 +134,7 @@ public class MetricSampleQuantiles {
 
     // Base case: no samples
     int start = 0;
-    if (samples.size() == 0) {
+    if (samples.isEmpty()) {
       SampleItem newItem = new SampleItem(buffer[0], 1, 0);
       samples.add(newItem);
       start++;
@@ -203,7 +203,7 @@ public class MetricSampleQuantiles {
    * @return Estimated value at that quantile.
    */
   private long query(double quantile) throws IOException {
-    if (samples.size() == 0) {
+    if (samples.isEmpty()) {
       throw new IOException("No samples present");
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java
index 59cb8dd..5d79722 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestDDLMasterFailover.java
@@ -924,7 +924,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
     String numThreadKey = String.format(NUM_THREADS_KEY, this.getClass().getSimpleName());
     numThreads = util.getConfiguration().getInt(numThreadKey, DEFAULT_NUM_THREADS);
 
-    ArrayList<Worker> workers = new ArrayList<>();
+    ArrayList<Worker> workers = new ArrayList<>(numThreads);
     for (int i = 0; i < numThreads; i++) {
       checkException(workers);
       Worker worker = new Worker();

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
index 0df4927..5c41fa0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngest.java
@@ -225,7 +225,7 @@ public class IntegrationTestIngest extends IntegrationTestBase {
 
   protected String[] getArgsForLoadTestTool(String mode, String modeSpecificArg, long startKey,
       long numKeys) {
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<String>(11);
     args.add("-tn");
     args.add(getTablename().getNameAsString());
     args.add("-families");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java
index b942918..133be1a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithVisibilityLabels.java
@@ -45,28 +45,28 @@ public class IntegrationTestIngestWithVisibilityLabels extends IntegrationTestIn
   private static final List<List<String>> AUTHS = new ArrayList<List<String>>();
 
   static {
-    ArrayList<String> tmp = new ArrayList<String>();
+    ArrayList<String> tmp = new ArrayList<String>(2);
     tmp.add("secret");
     tmp.add("confidential");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(1);
     tmp.add("topsecret");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(2);
     tmp.add("confidential");
     tmp.add("private");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(1);
     tmp.add("public");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(2);
     tmp.add("topsecret");
     tmp.add("private");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(1);
     tmp.add("confidential");
     AUTHS.add(tmp);
-    tmp = new ArrayList<String>();
+    tmp = new ArrayList<String>(2);
     tmp.add("topsecret");
     tmp.add("private");
     AUTHS.add(tmp);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
index 219a4e0..df83731 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
@@ -392,7 +392,7 @@ public class IntegrationTestRpcClient {
       cluster.startServer();
     }
 
-    ArrayList<SimpleClient> clients = new ArrayList<>();
+    ArrayList<SimpleClient> clients = new ArrayList<>(30);
 
     // all threads should share the same rpc client
     AbstractRpcClient<?> rpcClient = createRpcClient(conf, isSyncClient);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
index 135bea7..841d7d0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestBigLinkedList.java
@@ -1181,7 +1181,7 @@ public class IntegrationTestBigLinkedList extends IntegrationTestBase {
             // useless for debugging.
             context.getCounter("undef", keyString).increment(1);
           }
-        } else if (defCount > 0 && refs.size() == 0) {
+        } else if (defCount > 0 && refs.isEmpty()) {
           // node is defined but not referenced
           context.write(key, UNREF);
           context.getCounter(Counts.UNREFERENCED).increment(1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java
index c49db13..39140a3 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataExerciseFInts.java
@@ -43,7 +43,7 @@ public class TestRowDataExerciseFInts extends BaseTestRowData{
 
   static List<ByteRange> rows;
   static{
-    List<String> rowStrings = new ArrayList<String>();
+    List<String> rowStrings = new ArrayList<String>(16);
     rowStrings.add("com.edsBlog/directoryAa/pageAaa");
     rowStrings.add("com.edsBlog/directoryAa/pageBbb");
     rowStrings.add("com.edsBlog/directoryAa/pageCcc");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
index a615155..2d3901f 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataTrivialWithTags.java
@@ -46,7 +46,7 @@ public class TestRowDataTrivialWithTags extends BaseTestRowData{
 
   static List<KeyValue> d = Lists.newArrayList();
   static {
-    List<Tag> tagList = new ArrayList<Tag>();
+    List<Tag> tagList = new ArrayList<Tag>(2);
     Tag t = new ArrayBackedTag((byte) 1, "visisbility");
     tagList.add(t);
     t = new ArrayBackedTag((byte) 2, "ACL");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java
index 74ac85a..a71daaa 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/row/data/TestRowDataUrls.java
@@ -41,7 +41,7 @@ public class TestRowDataUrls extends BaseTestRowData{
 
   static List<ByteRange> rows;
   static{
-    List<String> rowStrings = new ArrayList<String>();
+    List<String> rowStrings = new ArrayList<String>(16);
     rowStrings.add("com.edsBlog/directoryAa/pageAaa");
     rowStrings.add("com.edsBlog/directoryAa/pageBbb");
     rowStrings.add("com.edsBlog/directoryAa/pageCcc");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java
index f11fab4..bccff6d 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataBasic.java
@@ -27,7 +27,7 @@ public class TestTimestampDataBasic implements TestTimestampData {
 
   @Override
   public List<Long> getInputs() {
-    List<Long> d = new ArrayList<Long>();
+    List<Long> d = new ArrayList<Long>(5);
     d.add(5L);
     d.add(3L);
     d.add(0L);
@@ -43,7 +43,7 @@ public class TestTimestampDataBasic implements TestTimestampData {
 
   @Override
   public List<Long> getOutputs() {
-    List<Long> d = new ArrayList<Long>();
+    List<Long> d = new ArrayList<Long>(4);
     d.add(0L);
     d.add(1L);
     d.add(3L);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java
index f5ed89d..2a5dcae 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataNumbers.java
@@ -29,7 +29,7 @@ public class TestTimestampDataNumbers implements TestTimestampData {
 
   @Override
   public List<Long> getInputs() {
-    List<Long> d = new ArrayList<Long>();
+    List<Long> d = new ArrayList<Long>(5);
     d.add(5L << shift);
     d.add(3L << shift);
     d.add(7L << shift);
@@ -45,7 +45,7 @@ public class TestTimestampDataNumbers implements TestTimestampData {
 
   @Override
   public List<Long> getOutputs() {
-    List<Long> d = new ArrayList<Long>();
+    List<Long> d = new ArrayList<Long>(4);
     d.add(1L << shift);
     d.add(3L << shift);
     d.add(5L << shift);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java
----------------------------------------------------------------------
diff --git a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java
index 69548d2..2186528 100644
--- a/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java
+++ b/hbase-prefix-tree/src/test/java/org/apache/hadoop/hbase/codec/prefixtree/timestamp/data/TestTimestampDataRepeats.java
@@ -29,7 +29,7 @@ public class TestTimestampDataRepeats implements TestTimestampData {
 
   @Override
   public List<Long> getInputs() {
-    List<Long> d = new ArrayList<Long>();
+    List<Long> d = new ArrayList<Long>(5);
     d.add(t);
     d.add(t);
     d.add(t);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
index 5f7db95..ec59607 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java
@@ -163,7 +163,7 @@ public class ProcedureWALPrettyPrinter extends Configured implements Tool {
         files.add(new Path(cmd.getOptionValue("f")));
       }
 
-      if (files.size() == 0 || cmd.hasOption("h")) {
+      if (files.isEmpty() || cmd.hasOption("h")) {
         HelpFormatter formatter = new HelpFormatter();
         formatter.printHelp("ProcedureWALPrettyPrinter ", options, true);
         return(-1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index b952c00..f7200cf 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -105,7 +105,7 @@ public class MultiRowResource extends ResourceBase implements Constants {
         }
       }
 
-      if (model.getRows().size() == 0) {
+      if (model.getRows().isEmpty()) {
       //If no rows found.
         servlet.getMetrics().incrementFailedGetRequests(1);
         return Response.status(Response.Status.NOT_FOUND)

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
index 5debf39..51a75d7 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteHTable.java
@@ -175,7 +175,7 @@ public class RemoteHTable implements Table {
   protected Result[] buildResultFromModel(final CellSetModel model) {
     List<Result> results = new ArrayList<Result>();
     for (RowModel row: model.getRows()) {
-      List<Cell> kvs = new ArrayList<Cell>();
+      List<Cell> kvs = new ArrayList<Cell>(row.getCells().size());
       for (CellModel cell: row.getCells()) {
         byte[][] split = KeyValue.parseColumn(cell.getColumn());
         byte[] column = split[0];

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
index b31ecf9..0c5af3c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java
@@ -87,7 +87,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan
     NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName);
 
     // For properly formed JSON, if no properties, field has to be null (not just no elements).
-    if(nd.getConfiguration().size() == 0){ return; }
+    if(nd.getConfiguration().isEmpty()){ return; }
 
     properties = new HashMap<String,String>();
     properties.putAll(nd.getConfiguration());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
index d6a5685..aed80aa 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java
@@ -66,7 +66,7 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler {
    */
   public NamespacesModel(Admin admin) throws IOException {
     NamespaceDescriptor[] nds = admin.listNamespaceDescriptors();
-    namespaces = new ArrayList<String>();
+    namespaces = new ArrayList<String>(nds.length);
     for (NamespaceDescriptor nd : nds) {
       namespaces.add(nd.getName());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index 024fec8..d484633 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -384,7 +384,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
         filter = new FamilyFilter(CompareOp.valueOf(op), comparator.build());
         break;
       case FilterList: {
-        List<Filter> list = new ArrayList<Filter>();
+        List<Filter> list = new ArrayList<Filter>(filters.size());
         for (FilterModel model: filters) {
           list.add(model.build());
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
index 84d1855..c1087b5 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestMultiRowResource.java
@@ -82,7 +82,7 @@ public class TestMultiRowResource {
 
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
-    List<Object[]> params = new ArrayList<Object[]>();
+    List<Object[]> params = new ArrayList<Object[]>(2);
     params.add(new Object[] {Boolean.TRUE});
     params.add(new Object[] {Boolean.FALSE});
     return params;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
index eed4f1a..bedd08e 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithFilters.java
@@ -957,7 +957,7 @@ public class TestScannersWithFilters {
     // Test getting a single row, single key using Row, Qualifier, and Value
     // regular expression and substring filters
     // Use must pass all
-    List<Filter> filters = new ArrayList<Filter>();
+    List<Filter> filters = new ArrayList<Filter>(3);
     filters.add(new RowFilter(CompareOp.EQUAL,
       new RegexStringComparator(".+-2")));
     filters.add(new QualifierFilter(CompareOp.EQUAL,

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
index cb235aa..8b0b5b2 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestScannersWithLabels.java
@@ -94,7 +94,7 @@ public class TestScannersWithLabels {
     byte[] k = new byte[3];
     byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));
 
-    List<Put> puts = new ArrayList<>();
+    List<Put> puts = new ArrayList<>(9);
     for (int i = 0; i < 9; i++) {
       Put put = new Put(Bytes.toBytes("row" + i));
       put.setDurability(Durability.SKIP_WAL);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
index 19fdaf0..96106e9 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/TestSchemaResource.java
@@ -73,7 +73,7 @@ public class TestSchemaResource {
 
   @Parameterized.Parameters
   public static Collection<Object[]> data() {
-    List<Object[]> params = new ArrayList<Object[]>();
+    List<Object[]> params = new ArrayList<Object[]>(2);
     params.add(new Object[] {Boolean.TRUE});
     params.add(new Object[] {Boolean.FALSE});
     return params;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
index 6d367c7..57f20fd 100644
--- a/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
+++ b/hbase-rest/src/test/java/org/apache/hadoop/hbase/rest/client/TestRemoteTable.java
@@ -262,7 +262,7 @@ public class TestRemoteTable {
 
   @Test
   public void testMultiGet() throws Exception {
-    ArrayList<Get> gets = new ArrayList<Get>();
+    ArrayList<Get> gets = new ArrayList<Get>(2);
     gets.add(new Get(ROW_1));
     gets.add(new Get(ROW_2));
     Result[] results = remoteTable.get(gets);
@@ -272,7 +272,7 @@ public class TestRemoteTable {
     assertEquals(2, results[1].size());
 
     //Test Versions
-    gets = new ArrayList<Get>();
+    gets = new ArrayList<Get>(2);
     Get g = new Get(ROW_1);
     g.setMaxVersions(3);
     gets.add(g);
@@ -284,13 +284,13 @@ public class TestRemoteTable {
     assertEquals(3, results[1].size());
 
     //404
-    gets = new ArrayList<Get>();
+    gets = new ArrayList<Get>(1);
     gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
     results = remoteTable.get(gets);
     assertNotNull(results);
     assertEquals(0, results.length);
 
-    gets = new ArrayList<Get>();
+    gets = new ArrayList<Get>(3);
     gets.add(new Get(Bytes.toBytes("RESALLYREALLYNOTTHERE")));
     gets.add(new Get(ROW_1));
     gets.add(new Get(ROW_2));
@@ -314,7 +314,7 @@ public class TestRemoteTable {
 
     // multiput
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(3);
     put = new Put(ROW_3);
     put.addColumn(COLUMN_2, QUALIFIER_2, VALUE_2);
     puts.add(put);
@@ -408,7 +408,7 @@ public class TestRemoteTable {
    */
   @Test
   public void testScanner() throws IOException {
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(4);
     Put put = new Put(ROW_1);
     put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
     puts.add(put);
@@ -499,7 +499,7 @@ public class TestRemoteTable {
    */
   @Test
   public void testIteratorScaner() throws IOException {
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(4);
     Put put = new Put(ROW_1);
     put.addColumn(COLUMN_1, QUALIFIER_1, VALUE_1);
     puts.add(put);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 47d0c0e..a8de198 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -309,7 +309,7 @@ public class RSGroupInfoManagerImpl implements RSGroupInfoManager, ServerListene
 
     List<TableName> specialTables;
     if(!master.isInitialized()) {
-      specialTables = new ArrayList<TableName>();
+      specialTables = new ArrayList<TableName>(4);
       specialTables.add(AccessControlLists.ACL_TABLE_NAME);
       specialTables.add(TableName.META_TABLE_NAME);
       specialTables.add(TableName.NAMESPACE_TABLE_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
index a725134..50302bc 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/TestRSGroupsBase.java
@@ -589,7 +589,7 @@ public abstract class TestRSGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
+        return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
       }
     });
     Set<HostAndPort> newServers = Sets.newHashSet();
@@ -606,7 +606,7 @@ public abstract class TestRSGroupsBase {
     TEST_UTIL.waitFor(WAIT_TIMEOUT, new Waiter.Predicate<Exception>() {
       @Override
       public boolean evaluate() throws Exception {
-        return cluster.getClusterStatus().getRegionsInTransition().size() == 0;
+        return cluster.getClusterStatus().getRegionsInTransition().isEmpty();
       }
     });
 


[37/50] [abbrv] hbase git commit: HBASE-17462 Use sliding window for read/write request costs in StochasticLoadBalancer (Tim Brown)

Posted by el...@apache.org.
HBASE-17462 Use sliding window for read/write request costs in StochasticLoadBalancer (Tim Brown)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7754a962
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7754a962
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7754a962

Branch: refs/heads/HBASE-16961
Commit: 7754a9620eff44d1d570fda534f9159a756310cd
Parents: f254e27
Author: tedyu <yu...@gmail.com>
Authored: Sun Jan 22 18:35:38 2017 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Sun Jan 22 18:35:38 2017 -0800

----------------------------------------------------------------------
 .../master/balancer/StochasticLoadBalancer.java | 47 ++++++++++++++------
 .../balancer/TestStochasticLoadBalancer.java    | 24 ++++++++++
 2 files changed, 57 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7754a962/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index b02aac1..4fbae6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
@@ -1300,21 +1299,42 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
 
     protected double getRegionLoadCost(Collection<RegionLoad> regionLoadList) {
       double cost = 0;
-
       for (RegionLoad rl : regionLoadList) {
-        double toAdd = getCostFromRl(rl);
+        cost += getCostFromRl(rl);
+      }
+      return cost / regionLoadList.size();
+    }
+
+    protected abstract double getCostFromRl(RegionLoad rl);
+  }
 
-        if (cost == 0) {
-          cost = toAdd;
+  /**
+   * Class to be used for the subset of RegionLoad costs that should be treated as rates.
+   * We do not compare about the actual rate in requests per second but rather the rate relative
+   * to the rest of the regions.
+   */
+  abstract static class CostFromRegionLoadAsRateFunction extends CostFromRegionLoadFunction {
+
+    CostFromRegionLoadAsRateFunction(Configuration conf) {
+      super(conf);
+    }
+
+    @Override
+    protected double getRegionLoadCost(Collection<RegionLoad> regionLoadList) {
+      double cost = 0;
+      double previous = 0;
+      boolean isFirst = true;
+      for (RegionLoad rl : regionLoadList) {
+        double current = getCostFromRl(rl);
+        if (isFirst) {
+          isFirst = false;
         } else {
-          cost = (.5 * cost) + (.5 * toAdd);
+          cost += current - previous;
         }
+        previous = current;
       }
-
-      return cost;
+      return Math.max(0, cost / (regionLoadList.size() - 1));
     }
-
-    protected abstract double getCostFromRl(RegionLoad rl);
   }
 
   /**
@@ -1322,7 +1342,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
    * computed cost will be.  This uses a rolling average of regionload.
    */
 
-  static class ReadRequestCostFunction extends CostFromRegionLoadFunction {
+  static class ReadRequestCostFunction extends CostFromRegionLoadAsRateFunction {
 
     private static final String READ_REQUEST_COST_KEY =
         "hbase.master.balancer.stochastic.readRequestCost";
@@ -1333,7 +1353,6 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
       this.setMultiplier(conf.getFloat(READ_REQUEST_COST_KEY, DEFAULT_READ_REQUEST_COST));
     }
 
-
     @Override
     protected double getCostFromRl(RegionLoad rl) {
       return rl.getReadRequestsCount();
@@ -1344,7 +1363,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
    * Compute the cost of total number of write requests.  The more unbalanced the higher the
    * computed cost will be.  This uses a rolling average of regionload.
    */
-  static class WriteRequestCostFunction extends CostFromRegionLoadFunction {
+  static class WriteRequestCostFunction extends CostFromRegionLoadAsRateFunction {
 
     private static final String WRITE_REQUEST_COST_KEY =
         "hbase.master.balancer.stochastic.writeRequestCost";
@@ -1522,7 +1541,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
    * Compute the cost of total memstore size.  The more unbalanced the higher the
    * computed cost will be.  This uses a rolling average of regionload.
    */
-  static class MemstoreSizeCostFunction extends CostFromRegionLoadFunction {
+  static class MemstoreSizeCostFunction extends CostFromRegionLoadAsRateFunction {
 
     private static final String MEMSTORE_SIZE_COST_KEY =
         "hbase.master.balancer.stochastic.memstoreSizeCost";

http://git-wip-us.apache.org/repos/asf/hbase/blob/7754a962/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
index 094687b..3d975b8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java
@@ -231,6 +231,30 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
   }
 
   @Test
+  public void testRegionLoadCost() {
+    List<RegionLoad> regionLoads = new ArrayList<>();
+    for (int i = 1; i < 5; i++) {
+      RegionLoad regionLoad = mock(RegionLoad.class);
+      when(regionLoad.getReadRequestsCount()).thenReturn(new Long(i));
+      when(regionLoad.getStorefileSizeMB()).thenReturn(i);
+      regionLoads.add(regionLoad);
+    }
+
+    Configuration conf = HBaseConfiguration.create();
+    StochasticLoadBalancer.ReadRequestCostFunction readCostFunction =
+        new StochasticLoadBalancer.ReadRequestCostFunction(conf);
+    double rateResult = readCostFunction.getRegionLoadCost(regionLoads);
+    // read requests are treated as a rate so the average rate here is simply 1
+    assertEquals(1, rateResult, 0.01);
+
+    StochasticLoadBalancer.StoreFileCostFunction storeFileCostFunction =
+        new StochasticLoadBalancer.StoreFileCostFunction(conf);
+    double result = storeFileCostFunction.getRegionLoadCost(regionLoads);
+    // storefile size cost is simply an average of it's value over time
+    assertEquals(2.5, result, 0.01);
+  }
+
+  @Test
   public void testCostFromArray() {
     Configuration conf = HBaseConfiguration.create();
     StochasticLoadBalancer.CostFromRegionLoadFunction


[35/50] [abbrv] hbase git commit: HBASE-17489 ClientScanner may send a next request to a RegionScanner which has been exhausted

Posted by el...@apache.org.
HBASE-17489 ClientScanner may send a next request to a RegionScanner which has been exhausted


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3abd13da
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3abd13da
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3abd13da

Branch: refs/heads/HBASE-16961
Commit: 3abd13dacb57927bd44a47632f4bd0c2e2bb87ea
Parents: 9a9e3df
Author: zhangduo <zh...@apache.org>
Authored: Sun Jan 22 10:02:29 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Sun Jan 22 16:39:00 2017 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/ClientScanner.java      | 162 ++--
 .../hbase/client/ReversedClientScanner.java     |   8 +-
 .../hadoop/hbase/client/TestClientScanner.java  |  29 +-
 .../hbase/regionserver/RSRpcServices.java       | 963 ++++++++++---------
 4 files changed, 613 insertions(+), 549 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3abd13da/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 283272a..ea91100 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -54,9 +54,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 
 /**
- * Implements the scanner interface for the HBase client.
- * If there are multiple regions in a table, this scanner will iterate
- * through them all.
+ * Implements the scanner interface for the HBase client. If there are multiple regions in a table,
+ * this scanner will iterate through them all.
  */
 @InterfaceAudience.Private
 public abstract class ClientScanner extends AbstractClientScanner {
@@ -229,15 +228,13 @@ public abstract class ClientScanner extends AbstractClientScanner {
     return false; // unlikely.
   }
 
-  private boolean possiblyNextScanner(int nbRows, final boolean done) throws IOException {
-    // If we have just switched replica, don't go to the next scanner yet. Rather, try
-    // the scanner operations on the new replica, from the right point in the scan
-    // Note that when we switched to a different replica we left it at a point
-    // where we just did the "openScanner" with the appropriate startrow
-    if (callable != null && callable.switchedToADifferentReplica()) return true;
-    return nextScanner(nbRows, done);
+  protected final void closeScanner() throws IOException {
+    if (this.callable != null) {
+      this.callable.setClose();
+      call(callable, caller, scannerTimeout);
+      this.callable = null;
+    }
   }
-
   /*
    * Gets a scanner for the next region. If this.currentRegion != null, then we will move to the
    * endrow of this.currentRegion. Else we will get scanner at the scan.getStartRow(). We will go no
@@ -248,11 +245,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
    */
   protected boolean nextScanner(int nbRows, final boolean done) throws IOException {
     // Close the previous scanner if it's open
-    if (this.callable != null) {
-      this.callable.setClose();
-      call(callable, caller, scannerTimeout);
-      this.callable = null;
-    }
+    closeScanner();
 
     // Where to start the next scanner
     byte[] localStartKey;
@@ -371,6 +364,37 @@ public abstract class ClientScanner extends AbstractClientScanner {
     return cache != null ? cache.size() : 0;
   }
 
+  private boolean regionExhausted(Result[] values) {
+    // This means the server tells us the whole scan operation is done. Usually decided by filter.
+    if (values == null) {
+      return true;
+    }
+    // Not a heartbeat message and we get nothing, this means the region is exhausted
+    if (values.length == 0 && !callable.isHeartbeatMessage()) {
+      return true;
+    }
+    // Server tells us that it has no more results for this region. Notice that this flag is get
+    // from the ScanResponse.getMoreResultsInRegion, not ScanResponse.getMoreResults. If the latter
+    // one is false then we will get a null values and quit in the first condition of this method.
+    if (callable.hasMoreResultsContext() && !callable.getServerHasMoreResults()) {
+      return true;
+    }
+    return false;
+  }
+
+  private void closeScannerIfExhausted(boolean exhausted) throws IOException {
+    if (exhausted) {
+      if (!partialResults.isEmpty()) {
+        // XXX: continue if there are partial results. But in fact server should not set
+        // hasMoreResults to false if there are partial results.
+        LOG.warn("Server tells us there is no more results for this region but we still have"
+            + " partialResults, this should not happen, retry on the current scanner anyway");
+      } else {
+        closeScanner();
+      }
+    }
+  }
+
   /**
    * Contact the servers to load more {@link Result}s in the cache.
    */
@@ -380,17 +404,18 @@ public abstract class ClientScanner extends AbstractClientScanner {
     Result[] values = null;
     long remainingResultSize = maxScannerResultSize;
     int countdown = this.caching;
+    // This is possible if we just stopped at the boundary of a region in the previous call.
+    if (callable == null) {
+      if (!nextScanner(countdown, false)) {
+        return;
+      }
+    }
     // We need to reset it if it's a new callable that was created with a countdown in nextScanner
     callable.setCaching(this.caching);
     // This flag is set when we want to skip the result returned. We do
     // this when we reset scanner because it split under us.
     boolean retryAfterOutOfOrderException = true;
-    // We don't expect that the server will have more results for us if
-    // it doesn't tell us otherwise. We rely on the size or count of results
-    boolean serverHasMoreResults = false;
-    boolean allResultsSkipped = false;
-    do {
-      allResultsSkipped = false;
+    for (;;) {
       try {
         // Server returns a null values if scanning is to stop. Else,
         // returns an empty array if scanning is to go on and we've just
@@ -436,7 +461,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
           // Reset the startRow to the row we've seen last so that the new scanner starts at
           // the correct row. Otherwise we may see previously returned rows again.
           // (ScannerCallable by now has "relocated" the correct region)
-          if (!this.lastResult.isPartial() && scan.getBatch() < 0 ) {
+          if (!this.lastResult.isPartial() && scan.getBatch() < 0) {
             if (scan.isReversed()) {
               scan.setStartRow(createClosestRowBefore(lastResult.getRow()));
             } else {
@@ -461,7 +486,10 @@ public abstract class ClientScanner extends AbstractClientScanner {
         // Set this to zero so we don't try and do an rpc and close on remote server when
         // the exception we got was UnknownScanner or the Server is going down.
         callable = null;
-        // This continue will take us to while at end of loop where we will set up new scanner.
+        // reopen the scanner
+        if (!nextScanner(countdown, false)) {
+          break;
+        }
         continue;
       }
       long currentTime = System.currentTimeMillis();
@@ -487,61 +515,58 @@ public abstract class ClientScanner extends AbstractClientScanner {
           remainingResultSize -= estimatedHeapSizeOfResult;
           addEstimatedSize(estimatedHeapSizeOfResult);
           this.lastResult = rs;
-          if (this.lastResult.isPartial() || scan.getBatch() > 0 ) {
+          if (this.lastResult.isPartial() || scan.getBatch() > 0) {
             updateLastCellLoadedToCache(this.lastResult);
           } else {
             this.lastCellLoadedToCache = null;
           }
         }
-        if (cache.isEmpty()) {
-          // all result has been seen before, we need scan more.
-          allResultsSkipped = true;
-          continue;
-        }
       }
+      boolean exhausted = regionExhausted(values);
       if (callable.isHeartbeatMessage()) {
-        if (cache.size() > 0) {
+        if (!cache.isEmpty()) {
           // Caller of this method just wants a Result. If we see a heartbeat message, it means
           // processing of the scan is taking a long time server side. Rather than continue to
           // loop until a limit (e.g. size or caching) is reached, break out early to avoid causing
           // unnecesary delays to the caller
           if (LOG.isTraceEnabled()) {
             LOG.trace("Heartbeat message received and cache contains Results."
-                    + " Breaking out of scan loop");
+                + " Breaking out of scan loop");
           }
+          // we know that the region has not been exhausted yet so just break without calling
+          // closeScannerIfExhausted
           break;
         }
-        continue;
       }
-
-      // We expect that the server won't have more results for us when we exhaust
-      // the size (bytes or count) of the results returned. If the server *does* inform us that
-      // there are more results, we want to avoid possiblyNextScanner(...). Only when we actually
-      // get results is the moreResults context valid.
-      if (null != values && values.length > 0 && callable.hasMoreResultsContext()) {
-        // Only adhere to more server results when we don't have any partialResults
-        // as it keeps the outer loop logic the same.
-        serverHasMoreResults = callable.getServerHasMoreResults() && partialResults.isEmpty();
+      if (countdown <= 0) {
+        // we have enough result.
+        closeScannerIfExhausted(exhausted);
+        break;
       }
-      // Values == null means server-side filter has determined we must STOP
-      // !partialResults.isEmpty() means that we are still accumulating partial Results for a
-      // row. We should not change scanners before we receive all the partial Results for that
-      // row.
-    } while (allResultsSkipped || (callable != null && callable.isHeartbeatMessage())
-        || (doneWithRegion(remainingResultSize, countdown, serverHasMoreResults)
-        && (!partialResults.isEmpty() || possiblyNextScanner(countdown, values == null))));
-  }
-
-  /**
-   * @param remainingResultSize
-   * @param remainingRows
-   * @param regionHasMoreResults
-   * @return true when the current region has been exhausted. When the current region has been
-   *         exhausted, the region must be changed before scanning can continue
-   */
-  private boolean doneWithRegion(long remainingResultSize, int remainingRows,
-      boolean regionHasMoreResults) {
-    return remainingResultSize > 0 && remainingRows > 0 && !regionHasMoreResults;
+      if (remainingResultSize <= 0) {
+        if (!cache.isEmpty()) {
+          closeScannerIfExhausted(exhausted);
+          break;
+        } else {
+          // we have reached the max result size but we still can not find anything to return to the
+          // user. Reset the maxResultSize and try again.
+          remainingResultSize = maxScannerResultSize;
+        }
+      }
+      // we are done with the current region
+      if (exhausted) {
+        if (!partialResults.isEmpty()) {
+          // XXX: continue if there are partial results. But in fact server should not set
+          // hasMoreResults to false if there are partial results.
+          LOG.warn("Server tells us there is no more results for this region but we still have"
+              + " partialResults, this should not happen, retry on the current scanner anyway");
+          continue;
+        }
+        if (!nextScanner(countdown, values == null)) {
+          break;
+        }
+      }
+    }
   }
 
   protected void addEstimatedSize(long estimatedHeapSizeOfResult) {
@@ -566,9 +591,8 @@ public abstract class ClientScanner extends AbstractClientScanner {
    * @return the list of results that should be added to the cache.
    * @throws IOException
    */
-  protected List<Result>
-      getResultsToAddToCache(Result[] resultsFromServer, boolean heartbeatMessage)
-          throws IOException {
+  protected List<Result> getResultsToAddToCache(Result[] resultsFromServer,
+      boolean heartbeatMessage) throws IOException {
     int resultSize = resultsFromServer != null ? resultsFromServer.length : 0;
     List<Result> resultsToAddToCache = new ArrayList<Result>(resultSize);
 
@@ -583,7 +607,7 @@ public abstract class ClientScanner extends AbstractClientScanner {
     // the batch size even though it may not be the last group of cells for that row.
     if (allowPartials || isBatchSet) {
       addResultsToList(resultsToAddToCache, resultsFromServer, 0,
-          (null == resultsFromServer ? 0 : resultsFromServer.length));
+        (null == resultsFromServer ? 0 : resultsFromServer.length));
       return resultsToAddToCache;
     }
 
@@ -769,12 +793,12 @@ public abstract class ClientScanner extends AbstractClientScanner {
   }
 
   /**
-   * Compare two Cells considering reversed scanner.
-   * ReversedScanner only reverses rows, not columns.
+   * Compare two Cells considering reversed scanner. ReversedScanner only reverses rows, not
+   * columns.
    */
   private int compare(Cell a, Cell b) {
-    CellComparator comparator = currentRegion != null && currentRegion.isMetaRegion() ?
-        CellComparator.META_COMPARATOR : CellComparator.COMPARATOR;
+    CellComparator comparator = currentRegion != null && currentRegion.isMetaRegion()
+        ? CellComparator.META_COMPARATOR : CellComparator.COMPARATOR;
     int r = comparator.compareRows(a, b);
     if (r != 0) {
       return this.scan.isReversed() ? -r : r;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3abd13da/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
index 390e236..e1a522a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedClientScanner.java
@@ -63,13 +63,7 @@ public class ReversedClientScanner extends ClientSimpleScanner {
   protected boolean nextScanner(int nbRows, final boolean done)
       throws IOException {
     // Close the previous scanner if it's open
-    if (this.callable != null) {
-      this.callable.setClose();
-      // callWithoutRetries is at this layer. Within the ScannerCallableWithReplicas,
-      // we do a callWithRetries
-      this.caller.callWithoutRetries(callable, scannerTimeout);
-      this.callable = null;
-    }
+    closeScanner();
 
     // Where to start the next scanner
     byte[] localStartKey;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3abd13da/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
index fd2a393..4319b9a 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java
@@ -158,7 +158,8 @@ public class TestClientScanner {
                 ScannerCallableWithReplicas.class);
           switch (count) {
             case 0: // initialize
-            case 2: // close
+            case 2: // detect no more results
+            case 3: // close
               count++;
               return null;
             case 1:
@@ -184,8 +185,10 @@ public class TestClientScanner {
 
       scanner.loadCache();
 
-      // One more call due to initializeScannerInConstruction()
-      inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(
+      // One for initializeScannerInConstruction()
+      // One for fetching the results
+      // One for fetching null results and quit as we do not have moreResults hint.
+      inOrder.verify(caller, Mockito.times(3)).callWithoutRetries(
           Mockito.any(RetryingCallable.class), Mockito.anyInt());
 
       assertEquals(1, scanner.cache.size());
@@ -224,7 +227,8 @@ public class TestClientScanner {
             case 1:
               count++;
               callable.setHasMoreResultsContext(true);
-              callable.setServerHasMoreResults(false);
+              // if we set false here the implementation will trigger a close
+              callable.setServerHasMoreResults(true);
               return results;
             default:
               throw new RuntimeException("Expected only 2 invocations");
@@ -291,7 +295,8 @@ public class TestClientScanner {
             case 1:
               count++;
               callable.setHasMoreResultsContext(true);
-              callable.setServerHasMoreResults(false);
+              // if we set false here the implementation will trigger a close
+              callable.setServerHasMoreResults(true);
               return results;
             default:
               throw new RuntimeException("Expected only 2 invocations");
@@ -470,13 +475,14 @@ public class TestClientScanner {
           Mockito.anyInt());
 
       InOrder inOrder = Mockito.inOrder(caller);
+      scanner.setRpcFinished(true);
 
       scanner.loadCache();
 
-      inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(
+      inOrder.verify(caller, Mockito.times(3)).callWithoutRetries(
           Mockito.any(RetryingCallable.class), Mockito.anyInt());
 
-      assertEquals(1, scanner.cache.size());
+      assertEquals(2, scanner.cache.size());
       Result r = scanner.cache.poll();
       assertNotNull(r);
       CellScanner cs = r.cellScanner();
@@ -484,15 +490,6 @@ public class TestClientScanner {
       assertEquals(kv1, cs.current());
       assertFalse(cs.advance());
 
-      scanner.setRpcFinished(true);
-
-      inOrder = Mockito.inOrder(caller);
-
-      scanner.loadCache();
-
-      inOrder.verify(caller, Mockito.times(3)).callWithoutRetries(
-          Mockito.any(RetryingCallable.class), Mockito.anyInt());
-
       r = scanner.cache.poll();
       assertNotNull(r);
       cs = r.cellScanner();

http://git-wip-us.apache.org/repos/asf/hbase/blob/3abd13da/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 1e9f16b..a072dce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -38,9 +38,11 @@ import java.util.NavigableMap;
 import java.util.Set;
 import java.util.TreeSet;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.LongAdder;
 
+import org.apache.commons.lang.mutable.MutableObject;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -77,7 +79,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.VersionInfoUtil;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
-import org.apache.hadoop.hbase.exceptions.MergeRegionException;
 import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
 import org.apache.hadoop.hbase.exceptions.ScannerResetException;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
@@ -96,6 +97,27 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
+import org.apache.hadoop.hbase.quotas.OperationQuota;
+import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
+import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
+import org.apache.hadoop.hbase.regionserver.Leases.Lease;
+import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
+import org.apache.hadoop.hbase.regionserver.Region.Operation;
+import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
+import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
+import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
+import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionForSplitOrMergeResponse;
@@ -107,10 +129,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegion
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
@@ -134,9 +156,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavor
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
@@ -177,18 +196,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.BulkLoadDescr
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescriptor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
-import org.apache.hadoop.hbase.quotas.OperationQuota;
-import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-import org.apache.hadoop.hbase.regionserver.Leases.Lease;
-import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-import org.apache.hadoop.hbase.regionserver.Region.Operation;
-import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler;
-import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.DNS;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -201,13 +208,6 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.zookeeper.KeeperException;
 
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
-
 /**
  * Implements the regionserver RPC services.
  */
@@ -260,8 +260,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   private final PriorityFunction priority;
 
   private final AtomicLong scannerIdGen = new AtomicLong(0L);
-  private final ConcurrentHashMap<String, RegionScannerHolder> scanners =
-    new ConcurrentHashMap<String, RegionScannerHolder>();
+  private final ConcurrentMap<String, RegionScannerHolder> scanners = new ConcurrentHashMap<>();
 
   /**
    * The lease timeout period for client scanners (milliseconds).
@@ -281,11 +280,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   /**
    * An Rpc callback for closing a RegionScanner.
    */
-   static class RegionScannerCloseCallBack implements RpcCallback {
+  private static final class RegionScannerCloseCallBack implements RpcCallback {
 
     private final RegionScanner scanner;
 
-    public RegionScannerCloseCallBack(RegionScanner scanner){
+    public RegionScannerCloseCallBack(RegionScanner scanner) {
       this.scanner = scanner;
     }
 
@@ -347,27 +346,31 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   /**
    * Holder class which holds the RegionScanner, nextCallSeq and RpcCallbacks together.
    */
-  private static class RegionScannerHolder {
-    private AtomicLong nextCallSeq = new AtomicLong(0);
-    private RegionScanner s;
-    private Region r;
-    final RpcCallback closeCallBack;
-    final RpcCallback shippedCallback;
-
-    public RegionScannerHolder(RegionScanner s, Region r, RpcCallback closeCallBack,
-        RpcCallback shippedCallback) {
+  private static final class RegionScannerHolder {
+
+    private final AtomicLong nextCallSeq = new AtomicLong(0);
+    private final String scannerName;
+    private final RegionScanner s;
+    private final Region r;
+    private final RpcCallback closeCallBack;
+    private final RpcCallback shippedCallback;
+
+    public RegionScannerHolder(String scannerName, RegionScanner s, Region r,
+        RpcCallback closeCallBack, RpcCallback shippedCallback) {
+      this.scannerName = scannerName;
       this.s = s;
       this.r = r;
       this.closeCallBack = closeCallBack;
       this.shippedCallback = shippedCallback;
     }
 
-    private long getNextCallSeq() {
+    public long getNextCallSeq() {
       return nextCallSeq.get();
     }
 
-    private void incNextCallSeq() {
-      nextCallSeq.incrementAndGet();
+    public boolean incNextCallSeq(long currentSeq) {
+      // Use CAS to prevent multiple scan request running on the same scanner.
+      return nextCallSeq.compareAndSet(currentSeq, currentSeq + 1);
     }
   }
 
@@ -476,19 +479,18 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
-  private void addResults(final ScanResponse.Builder builder, final List<Result> results,
-      final RpcController controller, boolean isDefaultRegion, boolean clientCellBlockSupported) {
+  private void addResults(ScanResponse.Builder builder, List<Result> results,
+      HBaseRpcController controller, boolean isDefaultRegion, boolean clientCellBlockSupported) {
     builder.setStale(!isDefaultRegion);
-    if (results == null || results.isEmpty()) return;
+    if (results.isEmpty()) return;
     if (clientCellBlockSupported) {
       for (Result res : results) {
         builder.addCellsPerResult(res.size());
         builder.addPartialFlagPerResult(res.isPartial());
       }
-      ((HBaseRpcController)controller).
-        setCellScanner(CellUtil.createCellScanner(results));
+      controller.setCellScanner(CellUtil.createCellScanner(results));
     } else {
-      for (Result res: results) {
+      for (Result res : results) {
         ClientProtos.Result pbr = ProtobufUtil.toResult(res);
         builder.addResults(pbr);
       }
@@ -1131,6 +1133,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
+  public
   RegionScanner getScanner(long scannerId) {
     String scannerIdString = Long.toString(scannerId);
     RegionScannerHolder scannerHolder = scanners.get(scannerIdString);
@@ -1202,10 +1205,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     return lastBlock;
   }
 
-  RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r)
+  private RegionScannerHolder addScanner(String scannerName, RegionScanner s, Region r)
       throws LeaseStillHeldException {
     Lease lease = regionServer.leases.createLease(scannerName, this.scannerLeaseTimeoutPeriod,
-        new ScannerListener(scannerName));
+      new ScannerListener(scannerName));
     RpcCallback shippedCallback = new RegionScannerShippedCallBack(scannerName, s, lease);
     RpcCallback closeCallback;
     if (s instanceof RpcCallback) {
@@ -1213,7 +1216,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     } else {
       closeCallback = new RegionScannerCloseCallBack(s);
     }
-    RegionScannerHolder rsh = new RegionScannerHolder(s, r, closeCallback, shippedCallback);
+    RegionScannerHolder rsh =
+        new RegionScannerHolder(scannerName, s, r, closeCallback, shippedCallback);
     RegionScannerHolder existing = scanners.putIfAbsent(scannerName, rsh);
     assert existing == null : "scannerId must be unique within regionserver's whole lifecycle!";
     return rsh;
@@ -2610,444 +2614,498 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
   }
 
-  /**
-   * Scan data in a table.
-   *
-   * @param controller the RPC controller
-   * @param request the scan request
-   * @throws ServiceException
-   */
-  @Override
-  public ScanResponse scan(final RpcController controller, final ScanRequest request)
-  throws ServiceException {
-    OperationQuota quota = null;
-    Leases.Lease lease = null;
-    String scannerName = null;
-    try {
-      if (!request.hasScannerId() && !request.hasScan()) {
-        throw new DoNotRetryIOException(
-          "Missing required input: scannerId or scan");
-      }
-      long scannerId = -1;
-      if (request.hasScannerId()) {
-        scannerId = request.getScannerId();
-        scannerName = String.valueOf(scannerId);
-      }
-      try {
-        checkOpen();
-      } catch (IOException e) {
-        // If checkOpen failed, server not running or filesystem gone,
-        // cancel this lease; filesystem is gone or we're closing or something.
-        if (scannerName != null) {
-          LOG.debug("Server shutting down and client tried to access missing scanner "
-            + scannerName);
-          if (regionServer.leases != null) {
-            try {
-              regionServer.leases.cancelLease(scannerName);
-            } catch (LeaseException le) {
-              // No problem, ignore
-              if (LOG.isTraceEnabled()) {
-                LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
-              }
-             }
-          }
-        }
-        throw e;
-      }
-      requestCount.increment();
-      rpcScanRequestCount.increment();
-
-      int ttl = 0;
-      Region region = null;
-      RegionScanner scanner = null;
-      RegionScannerHolder rsh = null;
-      boolean moreResults = true;
-      boolean closeScanner = false;
-      boolean isSmallScan = false;
-      ScanResponse.Builder builder = ScanResponse.newBuilder();
-      if (request.hasCloseScanner()) {
-        closeScanner = request.getCloseScanner();
-      }
-      int rows = closeScanner ? 0 : 1;
-      if (request.hasNumberOfRows()) {
-        rows = request.getNumberOfRows();
-      }
-      if (request.hasScannerId()) {
-        rsh = scanners.get(scannerName);
-        if (rsh == null) {
-          LOG.warn("Client tried to access missing scanner " + scannerName);
-          throw new UnknownScannerException(
+  // This is used to keep compatible with the old client implementation. Consider remove it if we
+  // decide to drop the support of the client that still sends close request to a region scanner
+  // which has already been exhausted.
+  @Deprecated
+  private static final IOException SCANNER_ALREADY_CLOSED = new IOException() {
+
+    private static final long serialVersionUID = -4305297078988180130L;
+
+    @Override
+    public Throwable fillInStackTrace() {
+      return this;
+    }
+  };
+
+  private RegionScannerHolder getRegionScanner(ScanRequest request) throws IOException {
+    String scannerName = Long.toString(request.getScannerId());
+    RegionScannerHolder rsh = scanners.get(scannerName);
+    if (rsh == null) {
+      // just ignore the close request if scanner does not exists.
+      if (request.hasCloseScanner() && request.getCloseScanner()) {
+        throw SCANNER_ALREADY_CLOSED;
+      } else {
+        LOG.warn("Client tried to access missing scanner " + scannerName);
+        throw new UnknownScannerException(
             "Unknown scanner '" + scannerName + "'. This can happen due to any of the following "
                 + "reasons: a) Scanner id given is wrong, b) Scanner lease expired because of "
                 + "long wait between consecutive client checkins, c) Server may be closing down, "
                 + "d) RegionServer restart during upgrade.\nIf the issue is due to reason (b), a "
                 + "possible fix would be increasing the value of"
                 + "'hbase.client.scanner.timeout.period' configuration.");
+      }
+    }
+    HRegionInfo hri = rsh.s.getRegionInfo();
+    // Yes, should be the same instance
+    if (regionServer.getOnlineRegion(hri.getRegionName()) != rsh.r) {
+      String msg = "Region was re-opened after the scanner" + scannerName + " was created: "
+          + hri.getRegionNameAsString();
+      LOG.warn(msg + ", closing...");
+      scanners.remove(scannerName);
+      try {
+        rsh.s.close();
+      } catch (IOException e) {
+        LOG.warn("Getting exception closing " + scannerName, e);
+      } finally {
+        try {
+          regionServer.leases.cancelLease(scannerName);
+        } catch (LeaseException e) {
+          LOG.warn("Getting exception closing " + scannerName, e);
         }
-        scanner = rsh.s;
-        HRegionInfo hri = scanner.getRegionInfo();
-        region = regionServer.getRegion(hri.getRegionName());
-        if (region != rsh.r) { // Yes, should be the same instance
-          throw new NotServingRegionException("Region was re-opened after the scanner"
-            + scannerName + " was created: " + hri.getRegionNameAsString());
-        }
-      } else {
-        region = getRegion(request.getRegion());
-        ClientProtos.Scan protoScan = request.getScan();
-        boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
-        Scan scan = ProtobufUtil.toScan(protoScan);
-        // if the request doesn't set this, get the default region setting.
-        if (!isLoadingCfsOnDemandSet) {
-          scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
-        }
+      }
+      throw new NotServingRegionException(msg);
+    }
+    return rsh;
+  }
 
-        isSmallScan = scan.isSmall();
-        if (!scan.hasFamilies()) {
-          // Adding all families to scanner
-          for (byte[] family: region.getTableDesc().getFamiliesKeys()) {
-            scan.addFamily(family);
-          }
-        }
+  private Pair<RegionScannerHolder, Boolean> newRegionScanner(ScanRequest request,
+      ScanResponse.Builder builder) throws IOException {
+    Region region = getRegion(request.getRegion());
+    ClientProtos.Scan protoScan = request.getScan();
+    boolean isLoadingCfsOnDemandSet = protoScan.hasLoadColumnFamiliesOnDemand();
+    Scan scan = ProtobufUtil.toScan(protoScan);
+    // if the request doesn't set this, get the default region setting.
+    if (!isLoadingCfsOnDemandSet) {
+      scan.setLoadColumnFamiliesOnDemand(region.isLoadingCfsOnDemandDefault());
+    }
 
-        if (region.getCoprocessorHost() != null) {
-          scanner = region.getCoprocessorHost().preScannerOpen(scan);
-        }
-        if (scanner == null) {
-          scanner = region.getScanner(scan);
-        }
-        if (region.getCoprocessorHost() != null) {
-          scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner);
-        }
-        scannerId = this.scannerIdGen.incrementAndGet();
-        scannerName = String.valueOf(scannerId);
-        rsh = addScanner(scannerName, scanner, region);
-        ttl = this.scannerLeaseTimeoutPeriod;
-        builder.setMvccReadPoint(scanner.getMvccReadPoint());
-      }
-      if (request.hasRenew() && request.getRenew()) {
-        rsh = scanners.get(scannerName);
-        lease = regionServer.leases.removeLease(scannerName);
-        if (lease != null && rsh != null) {
-          regionServer.leases.addLease(lease);
-          // Increment the nextCallSeq value which is the next expected from client.
-          rsh.incNextCallSeq();
-        }
-        return builder.build();
+    if (!scan.hasFamilies()) {
+      // Adding all families to scanner
+      for (byte[] family : region.getTableDesc().getFamiliesKeys()) {
+        scan.addFamily(family);
       }
-      RpcCallContext context = RpcServer.getCurrentCall();
-      Object lastBlock = null;
+    }
+    RegionScanner scanner = null;
+    if (region.getCoprocessorHost() != null) {
+      scanner = region.getCoprocessorHost().preScannerOpen(scan);
+    }
+    if (scanner == null) {
+      scanner = region.getScanner(scan);
+    }
+    if (region.getCoprocessorHost() != null) {
+      scanner = region.getCoprocessorHost().postScannerOpen(scan, scanner);
+    }
+    long scannerId = this.scannerIdGen.incrementAndGet();
+    builder.setScannerId(scannerId);
+    builder.setMvccReadPoint(scanner.getMvccReadPoint());
+    builder.setTtl(scannerLeaseTimeoutPeriod);
+    String scannerName = String.valueOf(scannerId);
+    return Pair.newPair(addScanner(scannerName, scanner, region), scan.isSmall());
+  }
 
-      quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
-      long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
+  private void checkScanNextCallSeq(ScanRequest request, RegionScannerHolder rsh)
+      throws OutOfOrderScannerNextException {
+    // if nextCallSeq does not match throw Exception straight away. This needs to be
+    // performed even before checking of Lease.
+    // See HBASE-5974
+    if (request.hasNextCallSeq()) {
+      long callSeq = request.getNextCallSeq();
+      if (!rsh.incNextCallSeq(callSeq)) {
+        throw new OutOfOrderScannerNextException("Expected nextCallSeq: " + rsh.getNextCallSeq()
+            + " But the nextCallSeq got from client: " + request.getNextCallSeq() + "; request="
+            + TextFormat.shortDebugString(request));
+      }
+    }
+  }
 
-      if (rows > 0) {
-        // if nextCallSeq does not match throw Exception straight away. This needs to be
-        // performed even before checking of Lease.
-        // See HBASE-5974
-        if (request.hasNextCallSeq()) {
-          if (rsh != null) {
-            if (request.getNextCallSeq() != rsh.getNextCallSeq()) {
-              throw new OutOfOrderScannerNextException(
-                "Expected nextCallSeq: " + rsh.getNextCallSeq()
-                + " But the nextCallSeq got from client: " + request.getNextCallSeq() +
-                "; request=" + TextFormat.shortDebugString(request));
-            }
-            // Increment the nextCallSeq value which is the next expected from client.
-            rsh.incNextCallSeq();
+  private void addScannerLeaseBack(Leases.Lease lease) {
+    try {
+      regionServer.leases.addLease(lease);
+    } catch (LeaseStillHeldException e) {
+      // should not happen as the scanner id is unique.
+      throw new AssertionError(e);
+    }
+  }
+
+  private long getTimeLimit(HBaseRpcController controller, boolean allowHeartbeatMessages) {
+    // Set the time limit to be half of the more restrictive timeout value (one of the
+    // timeout values must be positive). In the event that both values are positive, the
+    // more restrictive of the two is used to calculate the limit.
+    if (allowHeartbeatMessages && (scannerLeaseTimeoutPeriod > 0 || rpcTimeout > 0)) {
+      long timeLimitDelta;
+      if (scannerLeaseTimeoutPeriod > 0 && rpcTimeout > 0) {
+        timeLimitDelta = Math.min(scannerLeaseTimeoutPeriod, rpcTimeout);
+      } else {
+        timeLimitDelta = scannerLeaseTimeoutPeriod > 0 ? scannerLeaseTimeoutPeriod : rpcTimeout;
+      }
+      if (controller != null && controller.getCallTimeout() > 0) {
+        timeLimitDelta = Math.min(timeLimitDelta, controller.getCallTimeout());
+      }
+      // Use half of whichever timeout value was more restrictive... But don't allow
+      // the time limit to be less than the allowable minimum (could cause an
+      // immediatate timeout before scanning any data).
+      timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta);
+      // XXX: Can not use EnvironmentEdge here because TestIncrementTimeRange use a
+      // ManualEnvironmentEdge. Consider using System.nanoTime instead.
+      return System.currentTimeMillis() + timeLimitDelta;
+    }
+    // Default value of timeLimit is negative to indicate no timeLimit should be
+    // enforced.
+    return -1L;
+  }
+
+  // return whether we have more results in region.
+  private boolean scan(HBaseRpcController controller, ScanRequest request, RegionScannerHolder rsh,
+      boolean isSmallScan, long maxQuotaResultSize, int rows, List<Result> results,
+      ScanResponse.Builder builder, MutableObject lastBlock, RpcCallContext context)
+      throws IOException {
+    Region region = rsh.r;
+    RegionScanner scanner = rsh.s;
+    long maxResultSize;
+    if (scanner.getMaxResultSize() > 0) {
+      maxResultSize = Math.min(scanner.getMaxResultSize(), maxQuotaResultSize);
+    } else {
+      maxResultSize = maxQuotaResultSize;
+    }
+    // This is cells inside a row. Default size is 10 so if many versions or many cfs,
+    // then we'll resize. Resizings show in profiler. Set it higher than 10. For now
+    // arbitrary 32. TODO: keep record of general size of results being returned.
+    List<Cell> values = new ArrayList<Cell>(32);
+    region.startRegionOperation(Operation.SCAN);
+    try {
+      int i = 0;
+      long before = EnvironmentEdgeManager.currentTime();
+      synchronized (scanner) {
+        boolean stale = (region.getRegionInfo().getReplicaId() != 0);
+        boolean clientHandlesPartials =
+            request.hasClientHandlesPartials() && request.getClientHandlesPartials();
+        boolean clientHandlesHeartbeats =
+            request.hasClientHandlesHeartbeats() && request.getClientHandlesHeartbeats();
+
+        // On the server side we must ensure that the correct ordering of partial results is
+        // returned to the client to allow them to properly reconstruct the partial results.
+        // If the coprocessor host is adding to the result list, we cannot guarantee the
+        // correct ordering of partial results and so we prevent partial results from being
+        // formed.
+        boolean serverGuaranteesOrderOfPartials = results.isEmpty();
+        boolean allowPartialResults =
+            clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan;
+        boolean moreRows = false;
+
+        // Heartbeat messages occur when the processing of the ScanRequest is exceeds a
+        // certain time threshold on the server. When the time threshold is exceeded, the
+        // server stops the scan and sends back whatever Results it has accumulated within
+        // that time period (may be empty). Since heartbeat messages have the potential to
+        // create partial Results (in the event that the timeout occurs in the middle of a
+        // row), we must only generate heartbeat messages when the client can handle both
+        // heartbeats AND partials
+        boolean allowHeartbeatMessages = clientHandlesHeartbeats && allowPartialResults;
+
+        long timeLimit = getTimeLimit(controller, allowHeartbeatMessages);
+
+        final LimitScope sizeScope =
+            allowPartialResults ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
+        final LimitScope timeScope =
+            allowHeartbeatMessages ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
+
+        boolean trackMetrics = request.hasTrackScanMetrics() && request.getTrackScanMetrics();
+
+        // Configure with limits for this RPC. Set keep progress true since size progress
+        // towards size limit should be kept between calls to nextRaw
+        ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true);
+        contextBuilder.setSizeLimit(sizeScope, maxResultSize);
+        contextBuilder.setBatchLimit(scanner.getBatch());
+        contextBuilder.setTimeLimit(timeScope, timeLimit);
+        contextBuilder.setTrackMetrics(trackMetrics);
+        ScannerContext scannerContext = contextBuilder.build();
+        boolean limitReached = false;
+        while (i < rows) {
+          // Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The
+          // batch limit is a limit on the number of cells per Result. Thus, if progress is
+          // being tracked (i.e. scannerContext.keepProgress() is true) then we need to
+          // reset the batch progress between nextRaw invocations since we don't want the
+          // batch progress from previous calls to affect future calls
+          scannerContext.setBatchProgress(0);
+
+          // Collect values to be returned here
+          moreRows = scanner.nextRaw(values, scannerContext);
+
+          if (!values.isEmpty()) {
+            final boolean partial = scannerContext.partialResultFormed();
+            Result r = Result.create(values, null, stale, partial);
+            lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
+            results.add(r);
+            i++;
           }
-        }
-        boolean scannerClosed = false;
-        try {
-          // Remove lease while its being processed in server; protects against case
-          // where processing of request takes > lease expiration time.
-          lease = regionServer.leases.removeLease(scannerName);
-          List<Result> results = new ArrayList<Result>();
 
-          boolean done = false;
-          // Call coprocessor. Get region info from scanner.
-          if (region != null && region.getCoprocessorHost() != null) {
-            Boolean bypass = region.getCoprocessorHost().preScannerNext(
-              scanner, results, rows);
-            if (!results.isEmpty()) {
-              for (Result r : results) {
-                lastBlock = addSize(context, r, lastBlock);
-              }
+          boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS);
+          boolean timeLimitReached = scannerContext.checkTimeLimit(LimitScope.BETWEEN_ROWS);
+          boolean rowLimitReached = i >= rows;
+          limitReached = sizeLimitReached || timeLimitReached || rowLimitReached;
+
+          if (limitReached || !moreRows) {
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Done scanning. limitReached: " + limitReached + " moreRows: " + moreRows
+                  + " scannerContext: " + scannerContext);
             }
-            if (bypass != null && bypass.booleanValue()) {
-              done = true;
+            // We only want to mark a ScanResponse as a heartbeat message in the event that
+            // there are more values to be read server side. If there aren't more values,
+            // marking it as a heartbeat is wasteful because the client will need to issue
+            // another ScanRequest only to realize that they already have all the values
+            if (moreRows) {
+              // Heartbeat messages occur when the time limit has been reached.
+              builder.setHeartbeatMessage(timeLimitReached);
             }
+            break;
           }
+          values.clear();
+        }
+        if (limitReached || moreRows) {
+          // We stopped prematurely
+          builder.setMoreResultsInRegion(true);
+        } else {
+          // We didn't get a single batch
+          builder.setMoreResultsInRegion(false);
+        }
 
-          if (!done) {
-            long maxResultSize = Math.min(scanner.getMaxResultSize(), maxQuotaResultSize);
-            if (maxResultSize <= 0) {
-              maxResultSize = maxQuotaResultSize;
-            }
-            // This is cells inside a row. Default size is 10 so if many versions or many cfs,
-            // then we'll resize. Resizings show in profiler. Set it higher than 10. For now
-            // arbitrary 32. TODO: keep record of general size of results being returned.
-            List<Cell> values = new ArrayList<Cell>(32);
-            region.startRegionOperation(Operation.SCAN);
-            try {
-              int i = 0;
-              long before = EnvironmentEdgeManager.currentTime();
-              synchronized(scanner) {
-                boolean stale = (region.getRegionInfo().getReplicaId() != 0);
-                boolean clientHandlesPartials =
-                    request.hasClientHandlesPartials() && request.getClientHandlesPartials();
-                boolean clientHandlesHeartbeats =
-                    request.hasClientHandlesHeartbeats() && request.getClientHandlesHeartbeats();
-
-                // On the server side we must ensure that the correct ordering of partial results is
-                // returned to the client to allow them to properly reconstruct the partial results.
-                // If the coprocessor host is adding to the result list, we cannot guarantee the
-                // correct ordering of partial results and so we prevent partial results from being
-                // formed.
-                boolean serverGuaranteesOrderOfPartials = results.isEmpty();
-                boolean allowPartialResults =
-                    clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan;
-                boolean moreRows = false;
-
-                // Heartbeat messages occur when the processing of the ScanRequest is exceeds a
-                // certain time threshold on the server. When the time threshold is exceeded, the
-                // server stops the scan and sends back whatever Results it has accumulated within
-                // that time period (may be empty). Since heartbeat messages have the potential to
-                // create partial Results (in the event that the timeout occurs in the middle of a
-                // row), we must only generate heartbeat messages when the client can handle both
-                // heartbeats AND partials
-                boolean allowHeartbeatMessages = clientHandlesHeartbeats && allowPartialResults;
-
-                // Default value of timeLimit is negative to indicate no timeLimit should be
-                // enforced.
-                long timeLimit = -1;
-
-                // Set the time limit to be half of the more restrictive timeout value (one of the
-                // timeout values must be positive). In the event that both values are positive, the
-                // more restrictive of the two is used to calculate the limit.
-                if (allowHeartbeatMessages && (scannerLeaseTimeoutPeriod > 0 || rpcTimeout > 0)) {
-                  long timeLimitDelta;
-                  if (scannerLeaseTimeoutPeriod > 0 && rpcTimeout > 0) {
-                    timeLimitDelta = Math.min(scannerLeaseTimeoutPeriod, rpcTimeout);
-                  } else {
-                    timeLimitDelta =
-                        scannerLeaseTimeoutPeriod > 0 ? scannerLeaseTimeoutPeriod : rpcTimeout;
-                  }
-                  if (controller != null) {
-                    if (controller instanceof HBaseRpcController) {
-                      HBaseRpcController pRpcController =
-                          (HBaseRpcController)controller;
-                      if (pRpcController.getCallTimeout() > 0) {
-                        timeLimitDelta = Math.min(timeLimitDelta, pRpcController.getCallTimeout());
-                      }
-                    } else {
-                      throw new UnsupportedOperationException("We only do " +
-                        "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
-                    }
-                  }
-                  // Use half of whichever timeout value was more restrictive... But don't allow
-                  // the time limit to be less than the allowable minimum (could cause an
-                  // immediatate timeout before scanning any data).
-                  timeLimitDelta = Math.max(timeLimitDelta / 2, minimumScanTimeLimitDelta);
-                  timeLimit = System.currentTimeMillis() + timeLimitDelta;
-                }
-
-                final LimitScope sizeScope =
-                    allowPartialResults ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
-                final LimitScope timeScope =
-                    allowHeartbeatMessages ? LimitScope.BETWEEN_CELLS : LimitScope.BETWEEN_ROWS;
-
-                boolean trackMetrics =
-                    request.hasTrackScanMetrics() && request.getTrackScanMetrics();
-
-                // Configure with limits for this RPC. Set keep progress true since size progress
-                // towards size limit should be kept between calls to nextRaw
-                ScannerContext.Builder contextBuilder = ScannerContext.newBuilder(true);
-                contextBuilder.setSizeLimit(sizeScope, maxResultSize);
-                contextBuilder.setBatchLimit(scanner.getBatch());
-                contextBuilder.setTimeLimit(timeScope, timeLimit);
-                contextBuilder.setTrackMetrics(trackMetrics);
-                ScannerContext scannerContext = contextBuilder.build();
-                boolean limitReached = false;
-                while (i < rows) {
-                  // Reset the batch progress to 0 before every call to RegionScanner#nextRaw. The
-                  // batch limit is a limit on the number of cells per Result. Thus, if progress is
-                  // being tracked (i.e. scannerContext.keepProgress() is true) then we need to
-                  // reset the batch progress between nextRaw invocations since we don't want the
-                  // batch progress from previous calls to affect future calls
-                  scannerContext.setBatchProgress(0);
-
-                  // Collect values to be returned here
-                  moreRows = scanner.nextRaw(values, scannerContext);
-
-                  if (!values.isEmpty()) {
-                    final boolean partial = scannerContext.partialResultFormed();
-                    Result r = Result.create(values, null, stale, partial);
-                    lastBlock = addSize(context, r, lastBlock);
-                    results.add(r);
-                    i++;
-                  }
-
-                  boolean sizeLimitReached = scannerContext.checkSizeLimit(LimitScope.BETWEEN_ROWS);
-                  boolean timeLimitReached = scannerContext.checkTimeLimit(LimitScope.BETWEEN_ROWS);
-                  boolean rowLimitReached = i >= rows;
-                  limitReached = sizeLimitReached || timeLimitReached || rowLimitReached;
-
-                  if (limitReached || !moreRows) {
-                    if (LOG.isTraceEnabled()) {
-                      LOG.trace("Done scanning. limitReached: " + limitReached + " moreRows: "
-                          + moreRows + " scannerContext: " + scannerContext);
-                    }
-                    // We only want to mark a ScanResponse as a heartbeat message in the event that
-                    // there are more values to be read server side. If there aren't more values,
-                    // marking it as a heartbeat is wasteful because the client will need to issue
-                    // another ScanRequest only to realize that they already have all the values
-                    if (moreRows) {
-                      // Heartbeat messages occur when the time limit has been reached.
-                      builder.setHeartbeatMessage(timeLimitReached);
-                    }
-                    break;
-                  }
-                  values.clear();
-                }
-
-                if (limitReached || moreRows) {
-                  // We stopped prematurely
-                  builder.setMoreResultsInRegion(true);
-                } else {
-                  // We didn't get a single batch
-                  builder.setMoreResultsInRegion(false);
-                }
-
-                // Check to see if the client requested that we track metrics server side. If the
-                // client requested metrics, retrieve the metrics from the scanner context.
-                if (trackMetrics) {
-                  Map<String, Long> metrics = scannerContext.getMetrics().getMetricsMap();
-                  ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder();
-                  NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder();
-
-                  for (Entry<String, Long> entry : metrics.entrySet()) {
-                    pairBuilder.setName(entry.getKey());
-                    pairBuilder.setValue(entry.getValue());
-                    metricBuilder.addMetrics(pairBuilder.build());
-                  }
-
-                  builder.setScanMetrics(metricBuilder.build());
-                }
-              }
-              region.updateReadRequestsCount(i);
-              long end = EnvironmentEdgeManager.currentTime();
-              long responseCellSize = context != null ? context.getResponseCellSize() : 0;
-              region.getMetrics().updateScanTime(end - before);
-              if (regionServer.metricsRegionServer != null) {
-                regionServer.metricsRegionServer.updateScanSize(responseCellSize);
-                regionServer.metricsRegionServer.updateScanTime(end - before);
-              }
-            } finally {
-              region.closeRegionOperation();
-            }
-            // coprocessor postNext hook
-            if (region != null && region.getCoprocessorHost() != null) {
-              region.getCoprocessorHost().postScannerNext(scanner, results, rows, true);
-            }
+        // Check to see if the client requested that we track metrics server side. If the
+        // client requested metrics, retrieve the metrics from the scanner context.
+        if (trackMetrics) {
+          Map<String, Long> metrics = scannerContext.getMetrics().getMetricsMap();
+          ScanMetrics.Builder metricBuilder = ScanMetrics.newBuilder();
+          NameInt64Pair.Builder pairBuilder = NameInt64Pair.newBuilder();
+
+          for (Entry<String, Long> entry : metrics.entrySet()) {
+            pairBuilder.setName(entry.getKey());
+            pairBuilder.setValue(entry.getValue());
+            metricBuilder.addMetrics(pairBuilder.build());
           }
 
-          quota.addScanResult(results);
+          builder.setScanMetrics(metricBuilder.build());
+        }
+      }
+      region.updateReadRequestsCount(i);
+      long end = EnvironmentEdgeManager.currentTime();
+      long responseCellSize = context != null ? context.getResponseCellSize() : 0;
+      region.getMetrics().updateScanTime(end - before);
+      if (regionServer.metricsRegionServer != null) {
+        regionServer.metricsRegionServer.updateScanSize(responseCellSize);
+        regionServer.metricsRegionServer.updateScanTime(end - before);
+      }
+    } finally {
+      region.closeRegionOperation();
+    }
+    // coprocessor postNext hook
+    if (region.getCoprocessorHost() != null) {
+      region.getCoprocessorHost().postScannerNext(scanner, results, rows, true);
+    }
+    return builder.getMoreResultsInRegion();
+  }
 
-          // If the scanner's filter - if any - is done with the scan
-          // and wants to tell the client to stop the scan. This is done by passing
-          // a null result, and setting moreResults to false.
-          if (scanner.isFilterDone() && results.isEmpty()) {
-            moreResults = false;
-            results = null;
-          } else {
-            addResults(builder, results, controller,
-                RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()),
-                isClientCellBlockSupport(context));
-          }
-        } catch (IOException e) {
-          // The scanner state might be left in a dirty state, so we will tell the Client to
-          // fail this RPC and close the scanner while opening up another one from the start of
-          // row that the client has last seen.
-          closeScanner(region, scanner, scannerName, context);
-          // scanner is closed here
-          scannerClosed = true;
-
-          // If it is a CorruptHFileException or a FileNotFoundException, throw the
-          // DoNotRetryIOException. This can avoid the retry in ClientScanner.
-          if (e instanceof CorruptHFileException || e instanceof FileNotFoundException) {
-            throw new DoNotRetryIOException(e);
-          }
-          // We closed the scanner already. Instead of throwing the IOException, and client
-          // retrying with the same scannerId only to get USE on the next RPC, we directly throw
-          // a special exception to save an RPC.
-          if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
-            // 1.4.0+ clients know how to handle
-            throw new ScannerResetException("Scanner is closed on the server-side", e);
-          } else {
-            // older clients do not know about SRE. Just throw USE, which they will handle
-            throw new UnknownScannerException("Throwing UnknownScannerException to reset the client"
-                + " scanner state for clients older than 1.3.", e);
-          }
-        } finally {
-          // If the scanner is not closed, set the shipped callback
-          if (!scannerClosed) {
-            if (context != null) {
-              context.setCallBack(rsh.shippedCallback);
+  /**
+   * Scan data in a table.
+   *
+   * @param controller the RPC controller
+   * @param request the scan request
+   * @throws ServiceException
+   */
+  @Override
+  public ScanResponse scan(final RpcController controller, final ScanRequest request)
+      throws ServiceException {
+    if (controller != null && !(controller instanceof HBaseRpcController)) {
+      throw new UnsupportedOperationException(
+          "We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
+    }
+    if (!request.hasScannerId() && !request.hasScan()) {
+      throw new ServiceException(
+          new DoNotRetryIOException("Missing required input: scannerId or scan"));
+    }
+    try {
+      checkOpen();
+    } catch (IOException e) {
+      if (request.hasScannerId()) {
+        String scannerName = Long.toString(request.getScannerId());
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(
+            "Server shutting down and client tried to access missing scanner " + scannerName);
+        }
+        if (regionServer.leases != null) {
+          try {
+            regionServer.leases.cancelLease(scannerName);
+          } catch (LeaseException le) {
+            // No problem, ignore
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
             }
-
-            // Adding resets expiration time on lease.
-            if (scanners.containsKey(scannerName)) {
-              ttl = this.scannerLeaseTimeoutPeriod;
-              // When context != null, adding back the lease will be done in callback set above.
-              if (context == null) {
-                if (lease != null) regionServer.leases.addLease(lease);
-              }
+          }
+        }
+      }
+      throw new ServiceException(e);
+    }
+    requestCount.increment();
+    rpcScanRequestCount.increment();
+    RegionScannerHolder rsh;
+    ScanResponse.Builder builder = ScanResponse.newBuilder();
+    boolean isSmallScan;
+    try {
+      if (request.hasScannerId()) {
+        rsh = getRegionScanner(request);
+        isSmallScan = false;
+      } else {
+        Pair<RegionScannerHolder, Boolean> pair = newRegionScanner(request, builder);
+        rsh = pair.getFirst();
+        isSmallScan = pair.getSecond().booleanValue();
+      }
+    } catch (IOException e) {
+      if (e == SCANNER_ALREADY_CLOSED) {
+        // Now we will close scanner automatically if there are no more results for this region but
+        // the old client will still send a close request to us. Just ignore it and return.
+        return builder.build();
+      }
+      throw new ServiceException(e);
+    }
+    Region region = rsh.r;
+    String scannerName = rsh.scannerName;
+    Leases.Lease lease;
+    try {
+      // Remove lease while its being processed in server; protects against case
+      // where processing of request takes > lease expiration time.
+      lease = regionServer.leases.removeLease(scannerName);
+    } catch (LeaseException e) {
+      throw new ServiceException(e);
+    }
+    if (request.hasRenew() && request.getRenew()) {
+      // add back and return
+      addScannerLeaseBack(lease);
+      try {
+        checkScanNextCallSeq(request, rsh);
+      } catch (OutOfOrderScannerNextException e) {
+        throw new ServiceException(e);
+      }
+      return builder.build();
+    }
+    OperationQuota quota;
+    try {
+      quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
+    } catch (IOException e) {
+      addScannerLeaseBack(lease);
+      throw new ServiceException(e);
+    };
+    try {
+      checkScanNextCallSeq(request, rsh);
+    } catch (OutOfOrderScannerNextException e) {
+      addScannerLeaseBack(lease);
+      throw new ServiceException(e);
+    }
+    // Now we have increased the next call sequence. If we give client an error, the retry will
+    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
+    // and then client will try to open a new scanner.
+    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
+    int rows; // this is scan.getCaching
+    if (request.hasNumberOfRows()) {
+      rows = request.getNumberOfRows();
+    } else {
+      rows = closeScanner ? 0 : 1;
+    }
+    RpcCallContext context = RpcServer.getCurrentCall();
+    // now let's do the real scan.
+    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
+    RegionScanner scanner = rsh.s;
+    boolean moreResults = true;
+    boolean moreResultsInRegion = true;
+    MutableObject lastBlock = new MutableObject();
+    boolean scannerClosed = false;
+    try {
+      List<Result> results = new ArrayList<>();
+      if (rows > 0) {
+        boolean done = false;
+        // Call coprocessor. Get region info from scanner.
+        if (region.getCoprocessorHost() != null) {
+          Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
+          if (!results.isEmpty()) {
+            for (Result r : results) {
+              lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
             }
           }
+          if (bypass != null && bypass.booleanValue()) {
+            done = true;
+          }
+        }
+        if (!done) {
+          moreResultsInRegion = scan((HBaseRpcController) controller, request, rsh, isSmallScan,
+            maxQuotaResultSize, rows, results, builder, lastBlock, context);
         }
       }
 
-      if (!moreResults || closeScanner) {
-        ttl = 0;
+      quota.addScanResult(results);
+
+      if (scanner.isFilterDone() && results.isEmpty()) {
+        // If the scanner's filter - if any - is done with the scan
+        // only set moreResults to false if the results is empty. This is used to keep compatible
+        // with the old scan implementation where we just ignore the returned results if moreResults
+        // is false. Can remove the isEmpty check after we get rid of the old implementation.
         moreResults = false;
-        if (closeScanner(region, scanner, scannerName, context)) {
-          return builder.build(); // bypass
-        }
       }
-
-      if (ttl > 0) {
-        builder.setTtl(ttl);
+      addResults(builder, results, (HBaseRpcController) controller,
+        RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()),
+        isClientCellBlockSupport(context));
+      if (!moreResults || !moreResultsInRegion || closeScanner) {
+        scannerClosed = true;
+        closeScanner(region, scanner, scannerName, context);
       }
-      builder.setScannerId(scannerId);
       builder.setMoreResults(moreResults);
       return builder.build();
-    } catch (IOException ie) {
-      if (scannerName != null && ie instanceof NotServingRegionException) {
-        RegionScannerHolder rsh = scanners.remove(scannerName);
-        if (rsh != null) {
-          try {
-            RegionScanner scanner = rsh.s;
-            LOG.warn(scannerName + " encountered " + ie.getMessage() + ", closing ...");
-            scanner.close();
-            regionServer.leases.cancelLease(scannerName);
-          } catch (IOException e) {
-           LOG.warn("Getting exception closing " + scannerName, e);
-          }
+    } catch (Exception e) {
+      try {
+        // scanner is closed here
+        scannerClosed = true;
+        // The scanner state might be left in a dirty state, so we will tell the Client to
+        // fail this RPC and close the scanner while opening up another one from the start of
+        // row that the client has last seen.
+        closeScanner(region, scanner, scannerName, context);
+
+
+        // If it is a CorruptHFileException or a FileNotFoundException, throw the
+        // DoNotRetryIOException. This can avoid the retry in ClientScanner.
+        if (e instanceof CorruptHFileException || e instanceof FileNotFoundException) {
+          throw new DoNotRetryIOException(e);
         }
+        // We closed the scanner already. Instead of throwing the IOException, and client
+        // retrying with the same scannerId only to get USE on the next RPC, we directly throw
+        // a special exception to save an RPC.
+        if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
+          // 1.4.0+ clients know how to handle
+          throw new ScannerResetException("Scanner is closed on the server-side", e);
+        } else {
+          // older clients do not know about SRE. Just throw USE, which they will handle
+          throw new UnknownScannerException("Throwing UnknownScannerException to reset the client"
+              + " scanner state for clients older than 1.3.", e);
+        }
+      } catch (IOException ioe) {
+        throw new ServiceException(ioe);
       }
-      throw new ServiceException(ie);
     } finally {
-      if (quota != null) {
-        quota.close();
+      if (!scannerClosed) {
+        // Adding resets expiration time on lease.
+        // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
+        if (context != null) {
+          context.setCallBack(rsh.shippedCallback);
+        } else {
+          // When context != null, adding back the lease will be done in callback set above.
+          addScannerLeaseBack(lease);
+        }
       }
+      quota.close();
     }
   }
 
-  private boolean closeScanner(Region region, RegionScanner scanner, String scannerName,
+  private void closeScanner(Region region, RegionScanner scanner, String scannerName,
       RpcCallContext context) throws IOException {
-    if (region != null && region.getCoprocessorHost() != null) {
+    if (region.getCoprocessorHost() != null) {
       if (region.getCoprocessorHost().preScannerClose(scanner)) {
-        return true; // bypass
+        // bypass the actual close.
+        return;
       }
     }
     RegionScannerHolder rsh = scanners.remove(scannerName);
@@ -3057,19 +3115,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       } else {
         rsh.s.close();
       }
-      try {
-        regionServer.leases.cancelLease(scannerName);
-      } catch (LeaseException le) {
-        // No problem, ignore
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
-        }
-      }
-      if (region != null && region.getCoprocessorHost() != null) {
+      if (region.getCoprocessorHost() != null) {
         region.getCoprocessorHost().postScannerClose(scanner);
       }
     }
-    return false;
   }
 
   @Override


[20/50] [abbrv] hbase git commit: HBASE-16786 Procedure V2 - Move ZK-lock's uses to Procedure framework locks (LockProcedure) - Matteo Bertozzi Locks are no longer hosted up in zookeeper but instead by the Master.

Posted by el...@apache.org.
HBASE-16786 Procedure V2 - Move ZK-lock's uses to Procedure framework locks (LockProcedure) - Matteo Bertozzi
Locks are no longer hosted up in zookeeper but instead by the Master.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/76dc957f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/76dc957f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/76dc957f

Branch: refs/heads/HBASE-16961
Commit: 76dc957f64fa38ce88694054db7dbf590f368ae7
Parents: bff7c4f
Author: Michael Stack <st...@apache.org>
Authored: Mon Jan 16 22:18:53 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Thu Jan 19 09:34:17 2017 -0800

----------------------------------------------------------------------
 .../hbase/procedure2/ProcedureExecutor.java     |   2 +-
 .../procedure2/TestProcedureSuspended.java      |  10 +-
 .../hbase/rsgroup/RSGroupAdminServer.java       |  14 +-
 .../hadoop/hbase/client/locking/EntityLock.java |  25 +-
 .../hadoop/hbase/ipc/RpcServerFactory.java      |  13 +-
 .../hadoop/hbase/ipc/SimpleRpcServer.java       |   4 +-
 .../hadoop/hbase/master/AssignmentManager.java  |   8 +-
 .../master/ExpiredMobFileCleanerChore.java      |  36 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   8 +-
 .../hbase/master/MasterMobCompactionThread.java |  19 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |  86 +++-
 .../hadoop/hbase/master/MasterServices.java     |   5 -
 .../hadoop/hbase/master/MobCompactionChore.java |   9 +-
 .../hadoop/hbase/master/TableLockManager.java   | 453 -------------------
 .../hbase/master/locking/LockManager.java       |  22 +-
 .../hbase/master/locking/LockProcedure.java     |   1 +
 .../master/procedure/MasterProcedureEnv.java    |   3 +-
 .../procedure/MasterProcedureScheduler.java     | 183 ++------
 .../master/procedure/MasterProcedureUtil.java   |   2 +-
 .../master/snapshot/TakeSnapshotHandler.java    |  40 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  30 +-
 .../hbase/regionserver/CompactSplitThread.java  |   2 +-
 .../hadoop/hbase/regionserver/HMobStore.java    |  70 ++-
 .../hbase/regionserver/HRegionServer.java       |  45 +-
 .../hadoop/hbase/regionserver/HStore.java       |   1 -
 .../regionserver/RegionServerServices.java      |  14 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  33 +-
 .../hbase/util/hbck/TableLockChecker.java       |  87 ----
 .../hadoop/hbase/MockRegionServerServices.java  |  14 +-
 .../hbase/coprocessor/TestMasterObserver.java   |   3 +-
 .../hbase/master/MockNoopMasterServices.java    |   5 -
 .../hadoop/hbase/master/MockRegionServer.java   |  13 +-
 .../hbase/master/TestTableLockManager.java      | 433 ------------------
 .../hbase/master/locking/TestLockProcedure.java |   9 -
 ...ProcedureSchedulerPerformanceEvaluation.java |   4 +-
 .../procedure/TestMasterProcedureScheduler.java |  42 +-
 ...TestMasterProcedureSchedulerConcurrency.java |   4 +-
 .../TestMergeTableRegionsProcedure.java         | 231 ----------
 .../regionserver/TestMobStoreCompaction.java    |  19 +-
 .../regionserver/TestRegionServerMetrics.java   |   7 +-
 .../security/token/TestTokenAuthentication.java |   1 -
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |  80 ----
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |   1 -
 43 files changed, 325 insertions(+), 1766 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index d3b65e8..0912cb7 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -776,7 +776,7 @@ public class ProcedureExecutor<TEnvironment> {
     if (nonceKey != null) {
       currentProcId = nonceKeysToProcIdsMap.get(nonceKey);
       Preconditions.checkArgument(currentProcId != null,
-        "expected nonceKey=" + nonceKey + " to be reserved, use registerNonce()");
+        "Expected nonceKey=" + nonceKey + " to be reserved, use registerNonce(); proc=" + proc);
     } else {
       currentProcId = nextProcId();
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
index 9a108a8..0a8b0e4 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hbase.procedure2;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -28,20 +30,16 @@ import java.util.concurrent.atomic.AtomicLong;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
-import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.NoopProcedureStore;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Threads;
-
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 @Category({MasterTests.class, SmallTests.class})
 public class TestProcedureSuspended {
   private static final Log LOG = LogFactory.getLog(TestProcedureSuspended.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index dc28f7d..bf0feab 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -53,7 +53,8 @@ import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 
 /**
  * Service to support Region Server Grouping (HBase-6721)
@@ -273,10 +274,15 @@ public class RSGroupAdminServer extends RSGroupAdmin {
         master.getMasterCoprocessorHost().postMoveTables(tables, targetGroup);
       }
     }
-    for(TableName table: tables) {
-      TableLock lock = master.getTableLockManager().writeLock(table, "Group: table move");
+    for (TableName table: tables) {
+      LockManager.MasterLock lock = master.getLockManager().createMasterLock(table,
+          LockProcedure.LockType.EXCLUSIVE, this.getClass().getName() + ": Group: table move");
       try {
-        lock.acquire();
+        try {
+          lock.acquire();
+        } catch (InterruptedException e) {
+          throw new IOException("Interrupted when waiting for table lock", e);
+        }
         for (HRegionInfo region :
             master.getAssignmentManager().getRegionStates().getRegionsOfTable(table)) {
           master.getAssignmentManager().unassign(region);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
index 990c76d..c141c3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
@@ -164,6 +164,8 @@ public class EntityLock {
   /**
    * Sends rpc to the master to request lock.
    * The lock request is queued with other lock requests.
+   * Call {@link #await()} to wait on lock.
+   * Always call {@link #unlock()} after calling the below, even after error.
    */
   public void requestLock() throws IOException {
     if (procId == null) {
@@ -200,9 +202,7 @@ public class EntityLock {
   }
 
   public void unlock() throws IOException {
-    locked.set(false);
-    worker.interrupt();
-    Threads.shutdown(worker);
+    Threads.shutdown(worker.shutdown());
     try {
       stub.lockHeartbeat(null,
         LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build());
@@ -212,8 +212,21 @@ public class EntityLock {
   }
 
   protected class LockHeartbeatWorker extends Thread {
+    private volatile boolean shutdown = false;
+
     public LockHeartbeatWorker(final String desc) {
       super("LockHeartbeatWorker(" + desc + ")");
+      setDaemon(true);
+    }
+
+    /**
+     * Shutdown the thread cleanly, quietly. We done.
+     * @return
+     */
+    Thread shutdown() {
+      shutdown = true;
+      interrupt();
+      return this;
     }
 
     public void run() {
@@ -256,8 +269,10 @@ public class EntityLock {
         } catch (InterruptedException e) {
           // Since there won't be any more heartbeats, assume lock will be lost.
           locked.set(false);
-          LOG.error("Interrupted, releasing " + EntityLock.this, e);
-          abort.abort("Worker thread interrupted", e);
+          if (!this.shutdown) {
+            LOG.error("Interrupted, releasing " + this, e);
+            abort.abort("Worker thread interrupted", e);
+          }
           return;
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
index 7d91a2c..eb2b70e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServerFactory.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 
 @InterfaceAudience.Private
@@ -48,11 +49,17 @@ public class RpcServerFactory {
       RpcScheduler scheduler) throws IOException {
     String rpcServerClass = conf.get(CUSTOM_RPC_SERVER_IMPL_CONF_KEY,
         SimpleRpcServer.class.getName());
-    LOG.info("Use " + rpcServerClass + " rpc server");
+    StringBuffer servicesList = new StringBuffer();
+    for (BlockingServiceAndInterface s: services) {
+      ServiceDescriptor sd = s.getBlockingService().getDescriptorForType();
+      if (sd == null) continue; // Can be null for certain tests like TestTokenAuthentication
+      if (servicesList.length() > 0) servicesList.append(", ");
+      servicesList.append(sd.getFullName());
+    }
+    LOG.info("Creating " + rpcServerClass + " hosting " + servicesList);
     return ReflectionUtils.instantiateWithCustomCtor(rpcServerClass,
         new Class[] { Server.class, String.class, List.class,
             InetSocketAddress.class, Configuration.class, RpcScheduler.class },
         new Object[] { server, name, services, bindAddress, conf, scheduler });
   }
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 01d45cd..075d8b8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -1273,7 +1273,9 @@ public class SimpleRpcServer extends RpcServer {
       String serviceName = connectionHeader.getServiceName();
       if (serviceName == null) throw new EmptyServiceNameException();
       this.service = getService(services, serviceName);
-      if (this.service == null) throw new UnknownServiceException(serviceName);
+      if (this.service == null) {
+        throw new UnknownServiceException(serviceName);
+      }
       setupCellBlockCodecs(this.connectionHeader);
       RPCProtos.ConnectionHeaderResponse.Builder chrBuilder =
           RPCProtos.ConnectionHeaderResponse.newBuilder();

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
index 3ab4678..3005334 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java
@@ -112,8 +112,6 @@ public class AssignmentManager {
 
   private final MetricsAssignmentManager metricsAssignmentManager;
 
-  private final TableLockManager tableLockManager;
-
   private AtomicInteger numRegionsOpened = new AtomicInteger(0);
 
   final private KeyLocker<String> locker = new KeyLocker<String>();
@@ -212,13 +210,10 @@ public class AssignmentManager {
    * @param balancer implementation of {@link LoadBalancer}
    * @param service Executor service
    * @param metricsMaster metrics manager
-   * @param tableLockManager TableLock manager
    * @throws IOException
    */
   public AssignmentManager(MasterServices server, ServerManager serverManager,
-      final LoadBalancer balancer,
-      final ExecutorService service, MetricsMaster metricsMaster,
-      final TableLockManager tableLockManager,
+      final LoadBalancer balancer, final ExecutorService service, MetricsMaster metricsMaster,
       final TableStateManager tableStateManager)
           throws IOException {
     this.server = server;
@@ -258,7 +253,6 @@ public class AssignmentManager {
       conf.getInt("hbase.bulk.assignment.perregion.open.time", 10000);
 
     this.metricsAssignmentManager = new MetricsAssignmentManager();
-    this.tableLockManager = tableLockManager;
 
     // Configurations for retrying opening a region on receiving a FAILED_OPEN
     this.retryConfig = new RetryCounter.RetryConfig();

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
index 3261bd6..faa4f0e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.master;
 
-import java.io.IOException;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
@@ -29,8 +28,8 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.LockTimeoutException;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
@@ -44,7 +43,6 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
 
   private static final Log LOG = LogFactory.getLog(ExpiredMobFileCleanerChore.class);
   private final HMaster master;
-  private TableLockManager tableLockManager;
   private ExpiredMobFileCleaner cleaner;
 
   public ExpiredMobFileCleanerChore(HMaster master) {
@@ -53,7 +51,6 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
       .getConfiguration().getInt(MobConstants.MOB_CLEANER_PERIOD,
         MobConstants.DEFAULT_MOB_CLEANER_PERIOD), TimeUnit.SECONDS);
     this.master = master;
-    this.tableLockManager = master.getTableLockManager();
     cleaner = new ExpiredMobFileCleaner();
     cleaner.setConf(master.getConfiguration());
   }
@@ -70,33 +67,14 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
           if (hcd.isMobEnabled() && hcd.getMinVersions() == 0) {
             // clean only for mob-enabled column.
             // obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.
-            boolean tableLocked = false;
-            TableLock lock = null;
+            final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
+                MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.SHARED,
+                this.getClass().getSimpleName() + ": Cleaning expired mob files");
             try {
-              // the tableLockManager might be null in testing. In that case, it is lock-free.
-              if (tableLockManager != null) {
-                lock = tableLockManager.readLock(MobUtils.getTableLockName(htd.getTableName()),
-                  "Run ExpiredMobFileCleanerChore");
-                lock.acquire();
-              }
-              tableLocked = true;
+              lock.acquire();
               cleaner.cleanExpiredMobFiles(htd.getTableName().getNameAsString(), hcd);
-            } catch (LockTimeoutException e) {
-              LOG.info("Fail to acquire the lock because of timeout, maybe a"
-                + " MobCompactor is running", e);
-            } catch (IOException e) {
-              LOG.error(
-                "Fail to clean the expired mob files for the column " + hcd.getNameAsString()
-                  + " in the table " + htd.getNameAsString(), e);
             } finally {
-              if (lock != null && tableLocked) {
-                try {
-                  lock.release();
-                } catch (IOException e) {
-                  LOG.error(
-                    "Fail to release the read lock for the table " + htd.getNameAsString(), e);
-                }
-              }
+              lock.release();
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ab7a25e..154958b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -645,8 +645,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.splitOrMergeTracker.start();
 
     this.assignmentManager = new AssignmentManager(this, serverManager,
-      this.balancer, this.service, this.metricsMaster,
-      this.tableLockManager, tableStateManager);
+      this.balancer, this.service, this.metricsMaster, tableStateManager);
 
     this.replicationManager = new ReplicationManager(conf, zooKeeper, this);
 
@@ -732,8 +731,6 @@ public class HMaster extends HRegionServer implements MasterServices {
 
     this.serverManager = createServerManager(this);
 
-    // Invalidate all write locks held previously
-    this.tableLockManager.reapWriteLocks();
     this.tableStateManager = new TableStateManager(this);
 
     status.setStatus("Initializing ZK system trackers");
@@ -3030,8 +3027,7 @@ public class HMaster extends HRegionServer implements MasterServices {
    */
   public void requestMobCompaction(TableName tableName,
     List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
-    mobCompactThread.requestMobCompaction(conf, fs, tableName, columns,
-      tableLockManager, allFiles);
+    mobCompactThread.requestMobCompaction(conf, fs, tableName, columns, allFiles);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
index c0a915b..fc0ecfb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
@@ -74,15 +76,13 @@ public class MasterMobCompactionThread {
    * @param fs The file system
    * @param tableName The table the compact
    * @param columns The column descriptors
-   * @param tableLockManager The tableLock manager
    * @param allFiles Whether add all mob files into the compaction.
    */
   public void requestMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
-    List<HColumnDescriptor> columns, TableLockManager tableLockManager, boolean allFiles)
-    throws IOException {
+      List<HColumnDescriptor> columns, boolean allFiles) throws IOException {
     master.reportMobCompactionStart(tableName);
     try {
-      masterMobPool.execute(new CompactionRunner(fs, tableName, columns, tableLockManager,
+      masterMobPool.execute(new CompactionRunner(fs, tableName, columns,
         allFiles, mobCompactorPool));
     } catch (RejectedExecutionException e) {
       // in case the request is rejected by the pool
@@ -103,27 +103,28 @@ public class MasterMobCompactionThread {
     private FileSystem fs;
     private TableName tableName;
     private List<HColumnDescriptor> hcds;
-    private TableLockManager tableLockManager;
     private boolean allFiles;
     private ExecutorService pool;
 
     public CompactionRunner(FileSystem fs, TableName tableName, List<HColumnDescriptor> hcds,
-      TableLockManager tableLockManager, boolean allFiles, ExecutorService pool) {
+      boolean allFiles, ExecutorService pool) {
       super();
       this.fs = fs;
       this.tableName = tableName;
       this.hcds = hcds;
-      this.tableLockManager = tableLockManager;
       this.allFiles = allFiles;
       this.pool = pool;
     }
 
     @Override
     public void run() {
+      // These locks are on dummy table names, and only used for compaction/mob file cleaning.
+      final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
+          MobUtils.getTableLockName(tableName), LockProcedure.LockType.EXCLUSIVE,
+          this.getClass().getName() + ": mob compaction");
       try {
         for (HColumnDescriptor hcd : hcds) {
-          MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, tableLockManager,
-            allFiles);
+          MobUtils.doMobCompaction(conf, fs, tableName, hcd, pool, allFiles, lock);
         }
       } catch (IOException e) {
         LOG.error("Failed to perform the mob compaction", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 1151c92..60b8b65 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -52,16 +52,28 @@ import org.apache.hadoop.hbase.ipc.QosPriority;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.regionserver.RSRpcServices;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.access.AccessController;
+import org.apache.hadoop.hbase.security.visibility.VisibilityController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
@@ -71,9 +83,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.*;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
@@ -103,16 +119,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.Remov
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
-import org.apache.hadoop.hbase.regionserver.RSRpcServices;
-import org.apache.hadoop.hbase.replication.ReplicationException;
-import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.security.access.AccessController;
-import org.apache.hadoop.hbase.security.visibility.VisibilityController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
 import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -228,13 +234,15 @@ public class MasterRpcServices extends RSRpcServices
    * @return list of blocking services and their security info classes that this server supports
    */
   protected List<BlockingServiceAndInterface> getServices() {
-    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(4);
+    List<BlockingServiceAndInterface> bssi = new ArrayList<BlockingServiceAndInterface>(5);
     bssi.add(new BlockingServiceAndInterface(
       MasterService.newReflectiveBlockingService(this),
       MasterService.BlockingInterface.class));
     bssi.add(new BlockingServiceAndInterface(
       RegionServerStatusService.newReflectiveBlockingService(this),
       RegionServerStatusService.BlockingInterface.class));
+    bssi.add(new BlockingServiceAndInterface(LockService.newReflectiveBlockingService(this),
+        LockService.BlockingInterface.class));
     bssi.addAll(super.getServices());
     return bssi;
   }
@@ -1754,34 +1762,62 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public LockResponse requestLock(RpcController controller, LockRequest request)
+  public LockResponse requestLock(RpcController controller, final LockRequest request)
       throws ServiceException {
     try {
       if (request.getDescription().isEmpty()) {
         throw new IllegalArgumentException("Empty description");
       }
-      final long procId;
+      NonceProcedureRunnable npr;
       LockProcedure.LockType type = LockProcedure.LockType.valueOf(request.getLockType().name());
       if (request.getRegionInfoCount() > 0) {
         final HRegionInfo[] regionInfos = new HRegionInfo[request.getRegionInfoCount()];
         for (int i = 0; i < request.getRegionInfoCount(); ++i) {
           regionInfos[i] = HRegionInfo.convert(request.getRegionInfo(i));
         }
-        procId = master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
-            request.getDescription(), request.getNonceGroup(), request.getNonce());
-        return LockResponse.newBuilder().setProcId(procId).build();
+        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
+          @Override
+          protected void run() throws IOException {
+            setProcId(master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
+                request.getDescription(), getNonceKey()));
+          }
+
+          @Override
+          protected String getDescription() {
+            return "RequestLock";
+          }
+        };
       } else if (request.hasTableName()) {
         final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
-        procId = master.getLockManager().remoteLocks().requestTableLock(tableName, type,
-            request.getDescription(), request.getNonceGroup(), request.getNonce());
-        return LockResponse.newBuilder().setProcId(procId).build();
+        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
+          @Override
+          protected void run() throws IOException {
+            setProcId(master.getLockManager().remoteLocks().requestTableLock(tableName, type,
+                request.getDescription(), getNonceKey()));
+          }
+
+          @Override
+          protected String getDescription() {
+            return "RequestLock";
+          }
+        };
       } else if (request.hasNamespace()) {
-        procId = master.getLockManager().remoteLocks().requestNamespaceLock(
-            request.getNamespace(), type, request.getDescription(),
-            request.getNonceGroup(), request.getNonce());
+        npr = new NonceProcedureRunnable(master, request.getNonceGroup(), request.getNonce()) {
+          @Override
+          protected void run() throws IOException {
+            setProcId(master.getLockManager().remoteLocks().requestNamespaceLock(
+                request.getNamespace(), type, request.getDescription(), getNonceKey()));
+          }
+
+          @Override
+          protected String getDescription() {
+            return "RequestLock";
+          }
+        };
       } else {
         throw new IllegalArgumentException("one of table/namespace/region should be specified");
       }
+      long procId = MasterProcedureUtil.submitProcedure(npr);
       return LockResponse.newBuilder().setProcId(procId).build();
     } catch (IllegalArgumentException e) {
       LOG.warn("Exception when queuing lock", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 79ebca5..66758f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -93,11 +93,6 @@ public interface MasterServices extends Server {
   ExecutorService getExecutorService();
 
   /**
-   * @return Master's instance of {@link TableLockManager}
-   */
-  TableLockManager getTableLockManager();
-
-  /**
    * @return Master's instance of {@link TableStateManager}
    */
   TableStateManager getTableStateManager();

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 4b956e6..42a5445 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
 
 /**
@@ -40,14 +42,12 @@ public class MobCompactionChore extends ScheduledChore {
 
   private static final Log LOG = LogFactory.getLog(MobCompactionChore.class);
   private HMaster master;
-  private TableLockManager tableLockManager;
   private ExecutorService pool;
 
   public MobCompactionChore(HMaster master, int period) {
     // use the period as initial delay.
     super(master.getServerName() + "-MobCompactionChore", master, period, period, TimeUnit.SECONDS);
     this.master = master;
-    this.tableLockManager = master.getTableLockManager();
     this.pool = MobUtils.createMobCompactorThreadPool(master.getConfiguration());
   }
 
@@ -63,6 +63,9 @@ public class MobCompactionChore extends ScheduledChore {
         }
         boolean reported = false;
         try {
+          final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
+              MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE,
+              this.getClass().getName() + ": mob compaction");
           for (HColumnDescriptor hcd : htd.getColumnFamilies()) {
             if (!hcd.isMobEnabled()) {
               continue;
@@ -72,7 +75,7 @@ public class MobCompactionChore extends ScheduledChore {
               reported = true;
             }
             MobUtils.doMobCompaction(master.getConfiguration(), master.getFileSystem(),
-              htd.getTableName(), hcd, pool, tableLockManager, false);
+                htd.getTableName(), hcd, pool, false, lock);
           }
         } finally {
           if (reported) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
deleted file mode 100644
index c8eefa3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableLockManager.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.InterProcessLock;
-import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
-import org.apache.hadoop.hbase.InterProcessReadWriteLock;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.LockTimeoutException;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.hbase.zookeeper.lock.ZKInterProcessReadWriteLock;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * A manager for distributed table level locks.
- */
-@InterfaceAudience.Private
-public abstract class TableLockManager {
-
-  private static final Log LOG = LogFactory.getLog(TableLockManager.class);
-
-  /** Configuration key for enabling table-level locks for schema changes */
-  public static final String TABLE_LOCK_ENABLE =
-    "hbase.table.lock.enable";
-
-  /** by default we should enable table-level locks for schema changes */
-  private static final boolean DEFAULT_TABLE_LOCK_ENABLE = true;
-
-  /** Configuration key for time out for trying to acquire table locks */
-  protected static final String TABLE_WRITE_LOCK_TIMEOUT_MS =
-    "hbase.table.write.lock.timeout.ms";
-
-  /** Configuration key for time out for trying to acquire table locks */
-  protected static final String TABLE_READ_LOCK_TIMEOUT_MS =
-    "hbase.table.read.lock.timeout.ms";
-
-  protected static final long DEFAULT_TABLE_WRITE_LOCK_TIMEOUT_MS =
-    600 * 1000; //10 min default
-
-  protected static final long DEFAULT_TABLE_READ_LOCK_TIMEOUT_MS =
-    600 * 1000; //10 min default
-
-  public static final String TABLE_LOCK_EXPIRE_TIMEOUT = "hbase.table.lock.expire.ms";
-
-  public static final long DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS =
-      600 * 1000; //10 min default
-
-  /**
-   * A distributed lock for a table.
-   */
-  @InterfaceAudience.Private
-  public interface TableLock {
-    /**
-     * Acquire the lock, with the configured lock timeout.
-     * @throws LockTimeoutException If unable to acquire a lock within a specified
-     * time period (if any)
-     * @throws IOException If unrecoverable error occurs
-     */
-    void acquire() throws IOException;
-
-    /**
-     * Release the lock already held.
-     * @throws IOException If there is an unrecoverable error releasing the lock
-     */
-    void release() throws IOException;
-  }
-
-  /**
-   * Returns a TableLock for locking the table for exclusive access
-   * @param tableName Table to lock
-   * @param purpose Human readable reason for locking the table
-   * @return A new TableLock object for acquiring a write lock
-   */
-  public abstract TableLock writeLock(TableName tableName, String purpose);
-
-  /**
-   * Returns a TableLock for locking the table for shared access among read-lock holders
-   * @param tableName Table to lock
-   * @param purpose Human readable reason for locking the table
-   * @return A new TableLock object for acquiring a read lock
-   */
-  public abstract TableLock readLock(TableName tableName, String purpose);
-
-  /**
-   * Visits all table locks(read and write), and lock attempts with the given callback
-   * MetadataHandler.
-   * @param handler the metadata handler to call
-   * @throws IOException If there is an unrecoverable error
-   */
-  public abstract void visitAllLocks(MetadataHandler handler) throws IOException;
-
-  /**
-   * Force releases all table locks(read and write) that have been held longer than
-   * "hbase.table.lock.expire.ms". Assumption is that the clock skew between zookeeper
-   * and this servers is negligible.
-   * The behavior of the lock holders still thinking that they have the lock is undefined.
-   * @throws IOException If there is an unrecoverable error
-   */
-  public abstract void reapAllExpiredLocks() throws IOException;
-
-  /**
-   * Force releases table write locks and lock attempts even if this thread does
-   * not own the lock. The behavior of the lock holders still thinking that they
-   * have the lock is undefined. This should be used carefully and only when
-   * we can ensure that all write-lock holders have died. For example if only
-   * the master can hold write locks, then we can reap it's locks when the backup
-   * master starts.
-   * @throws IOException If there is an unrecoverable error
-   */
-  public abstract void reapWriteLocks() throws IOException;
-
-  /**
-   * Called after a table has been deleted, and after the table lock is  released.
-   * TableLockManager should do cleanup for the table state.
-   * @param tableName name of the table
-   * @throws IOException If there is an unrecoverable error releasing the lock
-   */
-  public abstract void tableDeleted(TableName tableName)
-      throws IOException;
-
-  /**
-   * Creates and returns a TableLockManager according to the configuration
-   */
-  public static TableLockManager createTableLockManager(Configuration conf,
-      ZooKeeperWatcher zkWatcher, ServerName serverName) {
-    // Initialize table level lock manager for schema changes, if enabled.
-    if (conf.getBoolean(TABLE_LOCK_ENABLE,
-        DEFAULT_TABLE_LOCK_ENABLE)) {
-      long writeLockTimeoutMs = conf.getLong(TABLE_WRITE_LOCK_TIMEOUT_MS,
-          DEFAULT_TABLE_WRITE_LOCK_TIMEOUT_MS);
-      long readLockTimeoutMs = conf.getLong(TABLE_READ_LOCK_TIMEOUT_MS,
-          DEFAULT_TABLE_READ_LOCK_TIMEOUT_MS);
-      long lockExpireTimeoutMs = conf.getLong(TABLE_LOCK_EXPIRE_TIMEOUT,
-          DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS);
-
-      return new ZKTableLockManager(zkWatcher, serverName, writeLockTimeoutMs, readLockTimeoutMs, lockExpireTimeoutMs);
-    }
-
-    return new NullTableLockManager();
-  }
-
-  /**
-   * A null implementation
-   */
-  @InterfaceAudience.Private
-  public static class NullTableLockManager extends TableLockManager {
-    static class NullTableLock implements TableLock {
-      @Override
-      public void acquire() throws IOException {
-      }
-      @Override
-      public void release() throws IOException {
-      }
-    }
-    @Override
-    public TableLock writeLock(TableName tableName, String purpose) {
-      return new NullTableLock();
-    }
-    @Override
-    public TableLock readLock(TableName tableName, String purpose) {
-      return new NullTableLock();
-    }
-    @Override
-    public void reapAllExpiredLocks() throws IOException {
-    }
-    @Override
-    public void reapWriteLocks() throws IOException {
-    }
-    @Override
-    public void tableDeleted(TableName tableName) throws IOException {
-    }
-    @Override
-    public void visitAllLocks(MetadataHandler handler) throws IOException {
-    }
-  }
-
-  /** Public for hbck */
-  public static ZooKeeperProtos.TableLock fromBytes(byte[] bytes) {
-    int pblen = ProtobufUtil.lengthOfPBMagic();
-    if (bytes == null || bytes.length < pblen) {
-      return null;
-    }
-    try {
-      ZooKeeperProtos.TableLock.Builder builder = ZooKeeperProtos.TableLock.newBuilder();
-      ProtobufUtil.mergeFrom(builder, bytes, pblen, bytes.length - pblen);
-      return builder.build();
-    } catch (IOException ex) {
-      LOG.warn("Exception in deserialization", ex);
-    }
-    return null;
-  }
-
-  /**
-   * ZooKeeper based TableLockManager
-   */
-  @InterfaceAudience.Private
-  private static class ZKTableLockManager extends TableLockManager {
-
-    private static final MetadataHandler METADATA_HANDLER = new MetadataHandler() {
-      @Override
-      public void handleMetadata(byte[] ownerMetadata) {
-        if (!LOG.isDebugEnabled()) {
-          return;
-        }
-        ZooKeeperProtos.TableLock data = fromBytes(ownerMetadata);
-        if (data == null) {
-          return;
-        }
-        LOG.debug("Table is locked by " +
-            String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " +
-                "purpose=%s, isShared=%s, createTime=%s]",
-                data.getTableName().getNamespace().toStringUtf8(),
-                data.getTableName().getQualifier().toStringUtf8(),
-                ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
-                data.getPurpose(), data.getIsShared(), data.getCreateTime()));
-      }
-    };
-
-    private static class TableLockImpl implements TableLock {
-      long lockTimeoutMs;
-      TableName tableName;
-      InterProcessLock lock;
-      boolean isShared;
-      ZooKeeperWatcher zkWatcher;
-      ServerName serverName;
-      String purpose;
-
-      public TableLockImpl(TableName tableName, ZooKeeperWatcher zkWatcher,
-          ServerName serverName, long lockTimeoutMs, boolean isShared, String purpose) {
-        this.tableName = tableName;
-        this.zkWatcher = zkWatcher;
-        this.serverName = serverName;
-        this.lockTimeoutMs = lockTimeoutMs;
-        this.isShared = isShared;
-        this.purpose = purpose;
-      }
-
-      @Override
-      public void acquire() throws IOException {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Attempt to acquire table " + (isShared ? "read" : "write") +
-            " lock on: " + tableName + " for:" + purpose);
-        }
-
-        lock = createTableLock();
-        try {
-          if (lockTimeoutMs == -1) {
-            // Wait indefinitely
-            lock.acquire();
-          } else {
-            if (!lock.tryAcquire(lockTimeoutMs)) {
-              throw new LockTimeoutException("Timed out acquiring " +
-                (isShared ? "read" : "write") + "lock for table:" + tableName +
-                "for:" + purpose + " after " + lockTimeoutMs + " ms.");
-            }
-          }
-        } catch (InterruptedException e) {
-          LOG.warn("Interrupted acquiring a lock for " + tableName, e);
-          Thread.currentThread().interrupt();
-          throw new InterruptedIOException("Interrupted acquiring a lock");
-        }
-        if (LOG.isTraceEnabled()) LOG.trace("Acquired table " + (isShared ? "read" : "write")
-            + " lock on " + tableName + " for " + purpose);
-      }
-
-      @Override
-      public void release() throws IOException {
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Attempt to release table " + (isShared ? "read" : "write")
-              + " lock on " + tableName);
-        }
-        if (lock == null) {
-          throw new IllegalStateException("Table " + tableName +
-            " is not locked!");
-        }
-
-        try {
-          lock.release();
-        } catch (InterruptedException e) {
-          LOG.warn("Interrupted while releasing a lock for " + tableName);
-          throw new InterruptedIOException();
-        }
-        if (LOG.isTraceEnabled()) {
-          LOG.trace("Released table lock on " + tableName);
-        }
-      }
-
-      private InterProcessLock createTableLock() {
-        String tableLockZNode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode,
-            tableName.getNameAsString());
-
-        ZooKeeperProtos.TableLock data = ZooKeeperProtos.TableLock.newBuilder()
-          .setTableName(ProtobufUtil.toProtoTableName(tableName))
-          .setLockOwner(ProtobufUtil.toServerName(serverName))
-          .setThreadId(Thread.currentThread().getId())
-          .setPurpose(purpose)
-          .setIsShared(isShared)
-          .setCreateTime(EnvironmentEdgeManager.currentTime()).build();
-        byte[] lockMetadata = toBytes(data);
-
-        InterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(zkWatcher, tableLockZNode,
-          METADATA_HANDLER);
-        return isShared ? lock.readLock(lockMetadata) : lock.writeLock(lockMetadata);
-      }
-    }
-
-    private static byte[] toBytes(ZooKeeperProtos.TableLock data) {
-      return ProtobufUtil.prependPBMagic(data.toByteArray());
-    }
-
-    private final ServerName serverName;
-    private final ZooKeeperWatcher zkWatcher;
-    private final long writeLockTimeoutMs;
-    private final long readLockTimeoutMs;
-    private final long lockExpireTimeoutMs;
-
-    /**
-     * Initialize a new manager for table-level locks.
-     * @param zkWatcher
-     * @param serverName Address of the server responsible for acquiring and
-     * releasing the table-level locks
-     * @param writeLockTimeoutMs Timeout (in milliseconds) for acquiring a write lock for a
-     * given table, or -1 for no timeout
-     * @param readLockTimeoutMs Timeout (in milliseconds) for acquiring a read lock for a
-     * given table, or -1 for no timeout
-     */
-    public ZKTableLockManager(ZooKeeperWatcher zkWatcher,
-      ServerName serverName, long writeLockTimeoutMs, long readLockTimeoutMs, long lockExpireTimeoutMs) {
-      this.zkWatcher = zkWatcher;
-      this.serverName = serverName;
-      this.writeLockTimeoutMs = writeLockTimeoutMs;
-      this.readLockTimeoutMs = readLockTimeoutMs;
-      this.lockExpireTimeoutMs = lockExpireTimeoutMs;
-    }
-
-    @Override
-    public TableLock writeLock(TableName tableName, String purpose) {
-      return new TableLockImpl(tableName, zkWatcher,
-          serverName, writeLockTimeoutMs, false, purpose);
-    }
-
-    public TableLock readLock(TableName tableName, String purpose) {
-      return new TableLockImpl(tableName, zkWatcher,
-          serverName, readLockTimeoutMs, true, purpose);
-    }
-
-    public void visitAllLocks(MetadataHandler handler) throws IOException {
-      for (String tableName : getTableNames()) {
-        String tableLockZNode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, tableName);
-        ZKInterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(
-            zkWatcher, tableLockZNode, null);
-        lock.readLock(null).visitLocks(handler);
-        lock.writeLock(null).visitLocks(handler);
-      }
-    }
-
-    private List<String> getTableNames() throws IOException {
-
-      List<String> tableNames;
-      try {
-        tableNames = ZKUtil.listChildrenNoWatch(zkWatcher, zkWatcher.znodePaths.tableLockZNode);
-      } catch (KeeperException e) {
-        LOG.error("Unexpected ZooKeeper error when listing children", e);
-        throw new IOException("Unexpected ZooKeeper exception", e);
-      }
-      return tableNames;
-    }
-
-    @Override
-    public void reapWriteLocks() throws IOException {
-      //get the table names
-      try {
-        for (String tableName : getTableNames()) {
-          String tableLockZNode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, tableName);
-          ZKInterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(
-              zkWatcher, tableLockZNode, null);
-          lock.writeLock(null).reapAllLocks();
-        }
-      } catch (IOException ex) {
-        throw ex;
-      } catch (Exception ex) {
-        LOG.warn("Caught exception while reaping table write locks", ex);
-      }
-    }
-
-    @Override
-    public void reapAllExpiredLocks() throws IOException {
-      //get the table names
-      try {
-        for (String tableName : getTableNames()) {
-          String tableLockZNode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, tableName);
-          ZKInterProcessReadWriteLock lock = new ZKInterProcessReadWriteLock(
-              zkWatcher, tableLockZNode, null);
-          lock.readLock(null).reapExpiredLocks(lockExpireTimeoutMs);
-          lock.writeLock(null).reapExpiredLocks(lockExpireTimeoutMs);
-        }
-      } catch (IOException ex) {
-        throw ex;
-      } catch (Exception ex) {
-        throw new IOException(ex);
-      }
-    }
-
-    @Override
-    public void tableDeleted(TableName tableName) throws IOException {
-      //table write lock from DeleteHandler is already released, just delete the parent znode
-      String tableNameStr = tableName.getNameAsString();
-      String tableLockZNode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, tableNameStr);
-      try {
-        ZKUtil.deleteNode(zkWatcher, tableLockZNode);
-      } catch (KeeperException ex) {
-        if (ex.code() == KeeperException.Code.NOTEMPTY) {
-          //we might get this in rare occasions where a CREATE table or some other table operation
-          //is waiting to acquire the lock. In this case, parent znode won't be deleted.
-          LOG.warn("Could not delete the znode for table locks because NOTEMPTY: "
-              + tableLockZNode);
-          return;
-        }
-        throw new IOException(ex);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
index 8f99f5e..b72e219 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
@@ -63,9 +63,8 @@ public final class LockManager {
     return new MasterLock(regionInfos, description);
   }
 
-  private void submitProcedure(final LockProcedure proc, final long nonceGroup, final long nonce) {
+  private void submitProcedure(final LockProcedure proc, final NonceKey nonceKey) {
     proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser());
-    final NonceKey nonceKey = master.getMasterProcedureExecutor().createNonceKey(nonceGroup, nonce);
     master.getMasterProcedureExecutor().submitProcedure(proc, nonceKey);
   }
 
@@ -205,27 +204,23 @@ public final class LockManager {
    */
   public class RemoteLocks {
     public long requestNamespaceLock(final String namespace, final LockProcedure.LockType type,
-        final String description, final long nonceGroup, final long nonce)
+        final String description, final NonceKey nonceKey)
         throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(namespace, null, null, type, description);
-
       final LockProcedure proc = new LockProcedure(master.getConfiguration(), namespace,
           type, description, null);
-      submitProcedure(proc, nonceGroup, nonce);
-
+      submitProcedure(proc, nonceKey);
       master.getMasterCoprocessorHost().postRequestLock(namespace, null, null, type, description);
       return proc.getProcId();
     }
 
     public long requestTableLock(final TableName tableName, final LockProcedure.LockType type,
-        final String description, final long nonceGroup, final long nonce)
+        final String description, final NonceKey nonceKey)
         throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(null, tableName, null, type, description);
-
       final LockProcedure proc = new LockProcedure(master.getConfiguration(), tableName,
           type, description, null);
-      submitProcedure(proc, nonceGroup, nonce);
-
+      submitProcedure(proc, nonceKey);
       master.getMasterCoprocessorHost().postRequestLock(null, tableName, null, type, description);
       return proc.getProcId();
     }
@@ -234,14 +229,13 @@ public final class LockManager {
      * @throws IllegalArgumentException if all regions are not from same table.
      */
     public long requestRegionsLock(final HRegionInfo[] regionInfos, final String description,
-        final long nonceGroup, final long nonce) throws IllegalArgumentException, IOException {
+        final NonceKey nonceKey)
+    throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(null, null, regionInfos,
             LockProcedure.LockType.EXCLUSIVE, description);
-
       final LockProcedure proc = new LockProcedure(master.getConfiguration(), regionInfos,
           LockProcedure.LockType.EXCLUSIVE, description, null);
-      submitProcedure(proc, nonceGroup, nonce);
-
+      submitProcedure(proc, nonceKey);
       master.getMasterCoprocessorHost().postRequestLock(null, null, regionInfos,
             LockProcedure.LockType.EXCLUSIVE, description);
       return proc.getProcId();

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index f793a65..1a1c8c3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -1,4 +1,5 @@
 /**
+
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
index 9362f24..353342a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -98,8 +98,7 @@ public class MasterProcedureEnv implements ConfigurationObserver {
 
   public MasterProcedureEnv(final MasterServices master) {
     this.master = master;
-    this.procSched = new MasterProcedureScheduler(master.getConfiguration(),
-      master.getTableLockManager());
+    this.procSched = new MasterProcedureScheduler(master.getConfiguration());
   }
 
   public User getRequestUser() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 3f588ff..b9b7b59 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import com.google.common.annotations.VisibleForTesting;
 
-import java.io.IOException;
 import java.util.ArrayDeque;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -35,8 +34,6 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
 import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -67,8 +64,6 @@ import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
 public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   private static final Log LOG = LogFactory.getLog(MasterProcedureScheduler.class);
 
-  private final TableLockManager lockManager;
-
   private final static NamespaceQueueKeyComparator NAMESPACE_QUEUE_KEY_COMPARATOR =
       new NamespaceQueueKeyComparator();
   private final static ServerQueueKeyComparator SERVER_QUEUE_KEY_COMPARATOR =
@@ -87,9 +82,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   private final int userTablePriority;
   private final int sysTablePriority;
 
-  public MasterProcedureScheduler(final Configuration conf, final TableLockManager lockManager) {
-    this.lockManager = lockManager;
-
+  public MasterProcedureScheduler(final Configuration conf) {
     // TODO: should this be part of the HTD?
     metaTablePriority = conf.getInt("hbase.master.procedure.queue.meta.table.priority", 3);
     sysTablePriority = conf.getInt("hbase.master.procedure.queue.system.table.priority", 2);
@@ -456,7 +449,6 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     private final NamespaceQueue namespaceQueue;
 
     private HashMap<String, RegionEvent> regionEventMap;
-    private TableLock tableLock = null;
 
     public TableQueue(TableName tableName, NamespaceQueue namespaceQueue, int priority) {
       super(tableName, priority);
@@ -544,65 +536,6 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       }
       throw new UnsupportedOperationException("unexpected type " + tpi.getTableOperationType());
     }
-
-    private synchronized boolean tryZkSharedLock(final TableLockManager lockManager,
-        final String purpose) {
-      // Since we only have one lock resource.  We should only acquire zk lock if the znode
-      // does not exist.
-      //
-      if (isSingleSharedLock()) {
-        // Take zk-read-lock
-        TableName tableName = getKey();
-        tableLock = lockManager.readLock(tableName, purpose);
-        try {
-          tableLock.acquire();
-        } catch (IOException e) {
-          LOG.error("failed acquire read lock on " + tableName, e);
-          tableLock = null;
-          return false;
-        }
-      }
-      return true;
-    }
-
-    private synchronized void releaseZkSharedLock(final TableLockManager lockManager) {
-      if (isSingleSharedLock()) {
-        releaseTableLock(lockManager, true);
-      }
-    }
-
-    private synchronized boolean tryZkExclusiveLock(final TableLockManager lockManager,
-        final String purpose) {
-      // Take zk-write-lock
-      TableName tableName = getKey();
-      tableLock = lockManager.writeLock(tableName, purpose);
-      try {
-        tableLock.acquire();
-      } catch (IOException e) {
-        LOG.error("failed acquire write lock on " + tableName, e);
-        tableLock = null;
-        return false;
-      }
-      return true;
-    }
-
-    private synchronized void releaseZkExclusiveLock(final TableLockManager lockManager) {
-      releaseTableLock(lockManager, true);
-    }
-
-    private void releaseTableLock(final TableLockManager lockManager, boolean reset) {
-      for (int i = 0; i < 3; ++i) {
-        try {
-          tableLock.release();
-          if (reset) {
-            tableLock = null;
-          }
-          break;
-        } catch (IOException e) {
-          LOG.warn("Could not release the table write-lock", e);
-        }
-      }
-    }
   }
 
   private static class NamespaceQueueKeyComparator implements AvlKeyComparator<NamespaceQueue> {
@@ -665,35 +598,22 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    */
   public boolean tryAcquireTableExclusiveLock(final Procedure procedure, final TableName table) {
     schedLock();
-    TableQueue queue = getTableQueue(table);
-    if (!queue.getNamespaceQueue().trySharedLock()) {
-      schedUnlock();
-      return false;
-    }
-
-    if (!queue.tryExclusiveLock(procedure)) {
-      queue.getNamespaceQueue().releaseSharedLock();
-      schedUnlock();
-      return false;
-    }
-
-    removeFromRunQueue(tableRunQueue, queue);
-    boolean hasParentLock = queue.hasParentLock(procedure);
-    schedUnlock();
+    try {
+      final TableQueue queue = getTableQueue(table);
+      if (!queue.getNamespaceQueue().trySharedLock()) {
+        return false;
+      }
 
-    boolean hasXLock = true;
-    if (!hasParentLock) {
-      // Zk lock is expensive...
-      hasXLock = queue.tryZkExclusiveLock(lockManager, procedure.toString());
-      if (!hasXLock) {
-        schedLock();
-        if (!hasParentLock) queue.releaseExclusiveLock(procedure);
+      if (!queue.tryExclusiveLock(procedure)) {
         queue.getNamespaceQueue().releaseSharedLock();
-        addToRunQueue(tableRunQueue, queue);
-        schedUnlock();
+        return false;
       }
+
+      removeFromRunQueue(tableRunQueue, queue);
+      return true;
+    } finally {
+      schedUnlock();
     }
-    return hasXLock;
   }
 
   /**
@@ -702,19 +622,17 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * @param table the name of the table that has the exclusive lock
    */
   public void releaseTableExclusiveLock(final Procedure procedure, final TableName table) {
-    final TableQueue queue = getTableQueueWithLock(table);
-    final boolean hasParentLock = queue.hasParentLock(procedure);
-
-    if (!hasParentLock) {
-      // Zk lock is expensive...
-      queue.releaseZkExclusiveLock(lockManager);
-    }
-
     schedLock();
-    if (!hasParentLock) queue.releaseExclusiveLock(procedure);
-    queue.getNamespaceQueue().releaseSharedLock();
-    addToRunQueue(tableRunQueue, queue);
-    schedUnlock();
+    try {
+      final TableQueue queue = getTableQueue(table);
+      if (!queue.hasParentLock(procedure)) {
+        queue.releaseExclusiveLock(procedure);
+      }
+      queue.getNamespaceQueue().releaseSharedLock();
+      addToRunQueue(tableRunQueue, queue);
+    } finally {
+      schedUnlock();
+    }
   }
 
   /**
@@ -731,29 +649,21 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   private TableQueue tryAcquireTableQueueSharedLock(final Procedure procedure,
       final TableName table) {
     schedLock();
-    TableQueue queue = getTableQueue(table);
-    if (!queue.getNamespaceQueue().trySharedLock()) {
-      return null;
-    }
+    try {
+      final TableQueue queue = getTableQueue(table);
+      if (!queue.getNamespaceQueue().trySharedLock()) {
+        return null;
+      }
 
-    if (!queue.trySharedLock()) {
-      queue.getNamespaceQueue().releaseSharedLock();
-      schedUnlock();
-      return null;
-    }
+      if (!queue.trySharedLock()) {
+        queue.getNamespaceQueue().releaseSharedLock();
+        return null;
+      }
 
-    // TODO: Zk lock is expensive and it would be perf bottleneck.  Long term solution is
-    // to remove it.
-    if (!queue.tryZkSharedLock(lockManager, procedure.toString())) {
-      queue.releaseSharedLock();
-      queue.getNamespaceQueue().releaseSharedLock();
+      return queue;
+    } finally {
       schedUnlock();
-      return null;
     }
-
-    schedUnlock();
-
-    return queue;
   }
 
   /**
@@ -762,17 +672,16 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * @param table the name of the table that has the shared lock
    */
   public void releaseTableSharedLock(final Procedure procedure, final TableName table) {
-    final TableQueue queue = getTableQueueWithLock(table);
-
     schedLock();
-    // Zk lock is expensive...
-    queue.releaseZkSharedLock(lockManager);
-
-    queue.getNamespaceQueue().releaseSharedLock();
-    if (queue.releaseSharedLock()) {
-      addToRunQueue(tableRunQueue, queue);
+    try {
+      final TableQueue queue = getTableQueue(table);
+      if (queue.releaseSharedLock()) {
+        addToRunQueue(tableRunQueue, queue);
+      }
+      queue.getNamespaceQueue().releaseSharedLock();
+    } finally {
+      schedUnlock();
     }
-    schedUnlock();
   }
 
   /**
@@ -796,14 +705,6 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         if (AvlIterableList.isLinked(queue)) {
           tableRunQueue.remove(queue);
         }
-
-        // Remove the table lock
-        try {
-          lockManager.tableDeleted(table);
-        } catch (IOException e) {
-          LOG.warn("Received exception from TableLockManager.tableDeleted:", e); //not critical
-        }
-
         removeTableQueue(table);
       } else {
         // TODO: If there are no create, we can drop all the other ops

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
index 9706107..62cb0a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureUtil.java
@@ -141,4 +141,4 @@ public final class MasterProcedureUtil {
     }
     return runnable.getProcId();
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
index a0b6d25..992f28e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/TakeSnapshotHandler.java
@@ -44,8 +44,8 @@ import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.MetricsSnapshot;
 import org.apache.hadoop.hbase.master.SnapshotSentinel;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.locking.LockManager;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -83,8 +83,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
   protected final Path workingDir;
   private final MasterSnapshotVerifier verifier;
   protected final ForeignExceptionDispatcher monitor;
-  protected final TableLockManager tableLockManager;
-  protected final TableLock tableLock;
+  protected final LockManager.MasterLock tableLock;
   protected final MonitoredTask status;
   protected final TableName snapshotTable;
   protected final SnapshotManifest snapshotManifest;
@@ -114,10 +113,9 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
     this.monitor = new ForeignExceptionDispatcher(snapshot.getName());
     this.snapshotManifest = SnapshotManifest.create(conf, fs, workingDir, snapshot, monitor);
 
-    this.tableLockManager = master.getTableLockManager();
-    this.tableLock = this.tableLockManager.writeLock(
-        snapshotTable,
-        EventType.C_M_SNAPSHOT_TABLE.toString());
+    this.tableLock = master.getLockManager().createMasterLock(
+        snapshotTable, LockProcedure.LockType.EXCLUSIVE,
+        this.getClass().getName() + ": take snapshot " + snapshot.getName());
 
     // prepare the verify
     this.verifier = new MasterSnapshotVerifier(masterServices, snapshot, rootDir);
@@ -138,18 +136,14 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
 
   public TakeSnapshotHandler prepare() throws Exception {
     super.prepare();
-    this.tableLock.acquire(); // after this, you should ensure to release this lock in
-                              // case of exceptions
-    boolean success = false;
+    // after this, you should ensure to release this lock in case of exceptions
+    this.tableLock.acquire();
     try {
       this.htd = loadTableDescriptor(); // check that .tableinfo is present
-      success = true;
-    } finally {
-      if (!success) {
-        releaseTableLock();
-      }
+    } catch (Exception e) {
+      this.tableLock.release();
+      throw e;
     }
-
     return this;
   }
 
@@ -234,17 +228,7 @@ public abstract class TakeSnapshotHandler extends EventHandler implements Snapsh
         LOG.error("Couldn't delete snapshot working directory:" + workingDir);
       }
       lock.unlock();
-      releaseTableLock();
-    }
-  }
-
-  protected void releaseTableLock() {
-    if (this.tableLock != null) {
-      try {
-        this.tableLock.release();
-      } catch (IOException ex) {
-        LOG.warn("Could not release the table lock", ex);
-      }
+      tableLock.release();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index 770c069..2592812 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -61,8 +61,8 @@ import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.mob.compactions.MobCompactor;
 import org.apache.hadoop.hbase.mob.compactions.PartitionedMobCompactor;
 import org.apache.hadoop.hbase.regionserver.BloomType;
@@ -699,12 +699,11 @@ public final class MobUtils {
    * @param tableName the table the compact
    * @param hcd the column descriptor
    * @param pool the thread pool
-   * @param tableLockManager the tableLock manager
    * @param allFiles Whether add all mob files into the compaction.
    */
   public static void doMobCompaction(Configuration conf, FileSystem fs, TableName tableName,
-    HColumnDescriptor hcd, ExecutorService pool, TableLockManager tableLockManager,
-    boolean allFiles) throws IOException {
+    HColumnDescriptor hcd, ExecutorService pool, boolean allFiles, LockManager.MasterLock lock)
+      throws IOException {
     String className = conf.get(MobConstants.MOB_COMPACTOR_CLASS_KEY,
       PartitionedMobCompactor.class.getName());
     // instantiate the mob compactor.
@@ -719,29 +718,14 @@ public final class MobUtils {
     // compact only for mob-enabled column.
     // obtain a write table lock before performing compaction to avoid race condition
     // with major compaction in mob-enabled column.
-    boolean tableLocked = false;
-    TableLock lock = null;
     try {
-      // the tableLockManager might be null in testing. In that case, it is lock-free.
-      if (tableLockManager != null) {
-        lock = tableLockManager.writeLock(MobUtils.getTableLockName(tableName),
-          "Run MobCompactor");
-        lock.acquire();
-      }
-      tableLocked = true;
+      lock.acquire();
       compactor.compact(allFiles);
     } catch (Exception e) {
       LOG.error("Failed to compact the mob files for the column " + hcd.getNameAsString()
-        + " in the table " + tableName.getNameAsString(), e);
+          + " in the table " + tableName.getNameAsString(), e);
     } finally {
-      if (lock != null && tableLocked) {
-        try {
-          lock.release();
-        } catch (IOException e) {
-          LOG.error(
-            "Failed to release the write lock for the table " + tableName.getNameAsString(), e);
-        }
-      }
+      lock.release();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 63929a8..6870445 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -80,7 +80,7 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
   public static final String REGION_SERVER_REGION_SPLIT_LIMIT =
       "hbase.regionserver.regionSplitLimit";
   public static final int DEFAULT_REGION_SERVER_REGION_SPLIT_LIMIT= 1000;
-  
+
   private final HRegionServer server;
   private final Configuration conf;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index 0bf6c9a..6ffa459 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -20,12 +20,14 @@ package org.apache.hadoop.hbase.regionserver;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Date;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -37,6 +39,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.TableName;
@@ -45,6 +48,7 @@ import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.io.compress.Compression;
@@ -52,8 +56,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CorruptHFileException;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
 import org.apache.hadoop.hbase.mob.MobCacheConfig;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobFile;
@@ -100,8 +102,6 @@ public class HMobStore extends HStore {
   private volatile long mobScanCellsCount = 0;
   private volatile long mobScanCellsSize = 0;
   private HColumnDescriptor family;
-  private TableLockManager tableLockManager;
-  private TableName tableLockName;
   private Map<String, List<Path>> map = new ConcurrentHashMap<String, List<Path>>();
   private final IdLock keyLock = new IdLock();
   // When we add a MOB reference cell to the HFile, we will add 2 tags along with it
@@ -126,10 +126,6 @@ public class HMobStore extends HStore {
     locations.add(HFileArchiveUtil.getStoreArchivePath(conf, tn, MobUtils.getMobRegionInfo(tn)
         .getEncodedName(), family.getNameAsString()));
     map.put(Bytes.toString(tn.getName()), locations);
-    if (region.getRegionServerServices() != null) {
-      tableLockManager = region.getRegionServerServices().getTableLockManager();
-      tableLockName = MobUtils.getTableLockName(getTableName());
-    }
     List<Tag> tags = new ArrayList<>(2);
     tags.add(MobConstants.MOB_REF_TAG);
     Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
@@ -482,39 +478,39 @@ public class HMobStore extends HStore {
       // Acquire a table lock to coordinate.
       // 1. If no, mark the major compaction as retainDeleteMarkers and continue the compaction.
       // 2. If the lock is obtained, run the compaction directly.
-      TableLock lock = null;
-      if (tableLockManager != null) {
-        lock = tableLockManager.readLock(tableLockName, "Major compaction in HMobStore");
-      }
-      boolean tableLocked = false;
-      String tableName = getTableName().getNameAsString();
-      if (lock != null) {
-        try {
-          LOG.info("Start to acquire a read lock for the table[" + tableName
-              + "], ready to perform the major compaction");
-          lock.acquire();
-          tableLocked = true;
-        } catch (Exception e) {
-          LOG.error("Fail to lock the table " + tableName, e);
-        }
-      } else {
-        // If the tableLockManager is null, mark the tableLocked as true.
-        tableLocked = true;
-      }
+      EntityLock lock = null;
       try {
-        if (!tableLocked) {
-          LOG.warn("Cannot obtain the table lock, maybe a sweep tool is running on this table["
-              + tableName + "], forcing the delete markers to be retained");
-          compaction.getRequest().forceRetainDeleteMarkers();
+        if (region.getRegionServerServices() != null) {
+          List<HRegionInfo> regionInfos = Collections.singletonList(region.getRegionInfo());
+          // regionLock takes shared lock on table too.
+          lock = region.getRegionServerServices().regionLock(regionInfos, "MOB compaction", null);
+          int awaitTime = conf.getInt(HRegionServer.REGION_LOCK_AWAIT_TIME_SEC,
+              HRegionServer.DEFAULT_REGION_LOCK_AWAIT_TIME_SEC);
+          try {
+            LOG.info("Acquiring MOB major compaction lock " + lock);
+            lock.requestLock();
+            lock.await(awaitTime, TimeUnit.SECONDS);
+          } catch (InterruptedException e) {
+            LOG.error("Interrupted exception when waiting for lock: " + lock, e);
+          }
+          if (!lock.isLocked()) {
+            // Remove lock from queue on the master so that if it's granted in future, we don't
+            // keep holding it until compaction finishes
+            lock.unlock();
+            lock = null;
+            LOG.warn("Cannot obtain table lock, maybe a sweep tool is running on this " + "table["
+                + getTableName() + "], forcing the delete markers to be retained");
+          }
+        } else {
+          LOG.warn("Cannot obtain lock because RegionServices not available. Are we running as "
+              + "compaction tool?");
         }
+        // If no lock, retain delete markers to be safe.
+        if (lock == null) compaction.getRequest().forceRetainDeleteMarkers();
         return super.compact(compaction, throughputController, user);
       } finally {
-        if (tableLocked && lock != null) {
-          try {
-            lock.release();
-          } catch (IOException e) {
-            LOG.error("Fail to release the table lock " + tableName, e);
-          }
+        if (lock != null && lock.isLocked()) {
+          lock.unlock();
         }
       }
     } else {


[10/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
index 654d152..b4bd84d 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/AdminProtos.java
@@ -16538,1406 +16538,6 @@ public final class AdminProtos {
 
   }
 
-  public interface MergeRegionsRequestOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeRegionsRequest)
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    boolean hasRegionA();
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA();
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder();
-
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    boolean hasRegionB();
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB();
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder();
-
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    boolean hasForcible();
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    boolean getForcible();
-
-    /**
-     * <pre>
-     * wall clock time from master
-     * </pre>
-     *
-     * <code>optional uint64 master_system_time = 4;</code>
-     */
-    boolean hasMasterSystemTime();
-    /**
-     * <pre>
-     * wall clock time from master
-     * </pre>
-     *
-     * <code>optional uint64 master_system_time = 4;</code>
-     */
-    long getMasterSystemTime();
-  }
-  /**
-   * <pre>
-   **
-   * Merges the specified regions.
-   * &lt;p&gt;
-   * This method currently closes the regions and then merges them
-   * </pre>
-   *
-   * Protobuf type {@code hbase.pb.MergeRegionsRequest}
-   */
-  public  static final class MergeRegionsRequest extends
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.MergeRegionsRequest)
-      MergeRegionsRequestOrBuilder {
-    // Use MergeRegionsRequest.newBuilder() to construct.
-    private MergeRegionsRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
-      super(builder);
-    }
-    private MergeRegionsRequest() {
-      forcible_ = false;
-      masterSystemTime_ = 0L;
-    }
-
-    @java.lang.Override
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-    getUnknownFields() {
-      return this.unknownFields;
-    }
-    private MergeRegionsRequest(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      this();
-      int mutable_bitField0_ = 0;
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = regionA_.toBuilder();
-              }
-              regionA_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(regionA_);
-                regionA_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-            case 18: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = regionB_.toBuilder();
-              }
-              regionB_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(regionB_);
-                regionB_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000002;
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000004;
-              forcible_ = input.readBool();
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000008;
-              masterSystemTime_ = input.readUInt64();
-              break;
-            }
-          }
-        }
-      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
-            e).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor;
-    }
-
-    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class);
-    }
-
-    private int bitField0_;
-    public static final int REGION_A_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_;
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    public boolean hasRegionA() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
-      return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
-    }
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
-      return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
-    }
-
-    public static final int REGION_B_FIELD_NUMBER = 2;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_;
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    public boolean hasRegionB() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
-      return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
-    }
-    /**
-     * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
-      return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
-    }
-
-    public static final int FORCIBLE_FIELD_NUMBER = 3;
-    private boolean forcible_;
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    public boolean hasForcible() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>optional bool forcible = 3 [default = false];</code>
-     */
-    public boolean getForcible() {
-      return forcible_;
-    }
-
-    public static final int MASTER_SYSTEM_TIME_FIELD_NUMBER = 4;
-    private long masterSystemTime_;
-    /**
-     * <pre>
-     * wall clock time from master
-     * </pre>
-     *
-     * <code>optional uint64 master_system_time = 4;</code>
-     */
-    public boolean hasMasterSystemTime() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
-    }
-    /**
-     * <pre>
-     * wall clock time from master
-     * </pre>
-     *
-     * <code>optional uint64 master_system_time = 4;</code>
-     */
-    public long getMasterSystemTime() {
-      return masterSystemTime_;
-    }
-
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized == 1) return true;
-      if (isInitialized == 0) return false;
-
-      if (!hasRegionA()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!hasRegionB()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getRegionA().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      if (!getRegionB().isInitialized()) {
-        memoizedIsInitialized = 0;
-        return false;
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, getRegionA());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, getRegionB());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeBool(3, forcible_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeUInt64(4, masterSystemTime_);
-      }
-      unknownFields.writeTo(output);
-    }
-
-    public int getSerializedSize() {
-      int size = memoizedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, getRegionA());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, getRegionB());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeBoolSize(3, forcible_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(4, masterSystemTime_);
-      }
-      size += unknownFields.getSerializedSize();
-      memoizedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) obj;
-
-      boolean result = true;
-      result = result && (hasRegionA() == other.hasRegionA());
-      if (hasRegionA()) {
-        result = result && getRegionA()
-            .equals(other.getRegionA());
-      }
-      result = result && (hasRegionB() == other.hasRegionB());
-      if (hasRegionB()) {
-        result = result && getRegionB()
-            .equals(other.getRegionB());
-      }
-      result = result && (hasForcible() == other.hasForcible());
-      if (hasForcible()) {
-        result = result && (getForcible()
-            == other.getForcible());
-      }
-      result = result && (hasMasterSystemTime() == other.hasMasterSystemTime());
-      if (hasMasterSystemTime()) {
-        result = result && (getMasterSystemTime()
-            == other.getMasterSystemTime());
-      }
-      result = result && unknownFields.equals(other.unknownFields);
-      return result;
-    }
-
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasRegionA()) {
-        hash = (37 * hash) + REGION_A_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionA().hashCode();
-      }
-      if (hasRegionB()) {
-        hash = (37 * hash) + REGION_B_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionB().hashCode();
-      }
-      if (hasForcible()) {
-        hash = (37 * hash) + FORCIBLE_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
-            getForcible());
-      }
-      if (hasMasterSystemTime()) {
-        hash = (37 * hash) + MASTER_SYSTEM_TIME_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
-            getMasterSystemTime());
-      }
-      hash = (29 * hash) + unknownFields.hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(byte[] data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        byte[] data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseDelimitedFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder() {
-      return DEFAULT_INSTANCE.toBuilder();
-    }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest prototype) {
-      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() {
-      return this == DEFAULT_INSTANCE
-          ? new Builder() : new Builder().mergeFrom(this);
-    }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * <pre>
-     **
-     * Merges the specified regions.
-     * &lt;p&gt;
-     * This method currently closes the regions and then merges them
-     * </pre>
-     *
-     * Protobuf type {@code hbase.pb.MergeRegionsRequest}
-     */
-    public static final class Builder extends
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.MergeRegionsRequest)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequestOrBuilder {
-      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor;
-      }
-
-      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-                .alwaysUseFieldBuilders) {
-          getRegionAFieldBuilder();
-          getRegionBFieldBuilder();
-        }
-      }
-      public Builder clear() {
-        super.clear();
-        if (regionABuilder_ == null) {
-          regionA_ = null;
-        } else {
-          regionABuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        if (regionBBuilder_ == null) {
-          regionB_ = null;
-        } else {
-          regionBBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        forcible_ = false;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        masterSystemTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        return this;
-      }
-
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsRequest_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (regionABuilder_ == null) {
-          result.regionA_ = regionA_;
-        } else {
-          result.regionA_ = regionABuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        if (regionBBuilder_ == null) {
-          result.regionB_ = regionB_;
-        } else {
-          result.regionB_ = regionBBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.forcible_ = forcible_;
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000008;
-        }
-        result.masterSystemTime_ = masterSystemTime_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder clone() {
-        return (Builder) super.clone();
-      }
-      public Builder setField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.setField(field, value);
-      }
-      public Builder clearField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
-        return (Builder) super.clearField(field);
-      }
-      public Builder clearOneof(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
-        return (Builder) super.clearOneof(oneof);
-      }
-      public Builder setRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          int index, Object value) {
-        return (Builder) super.setRepeatedField(field, index, value);
-      }
-      public Builder addRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.addRepeatedField(field, value);
-      }
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance()) return this;
-        if (other.hasRegionA()) {
-          mergeRegionA(other.getRegionA());
-        }
-        if (other.hasRegionB()) {
-          mergeRegionB(other.getRegionB());
-        }
-        if (other.hasForcible()) {
-          setForcible(other.getForcible());
-        }
-        if (other.hasMasterSystemTime()) {
-          setMasterSystemTime(other.getMasterSystemTime());
-        }
-        this.mergeUnknownFields(other.unknownFields);
-        onChanged();
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (!hasRegionA()) {
-          return false;
-        }
-        if (!hasRegionB()) {
-          return false;
-        }
-        if (!getRegionA().isInitialized()) {
-          return false;
-        }
-        if (!getRegionB().isInitialized()) {
-          return false;
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest) e.getUnfinishedMessage();
-          throw e.unwrapIOException();
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionA_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionABuilder_;
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public boolean hasRegionA() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionA() {
-        if (regionABuilder_ == null) {
-          return regionA_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
-        } else {
-          return regionABuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public Builder setRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionABuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          regionA_ = value;
-          onChanged();
-        } else {
-          regionABuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public Builder setRegionA(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
-        if (regionABuilder_ == null) {
-          regionA_ = builderForValue.build();
-          onChanged();
-        } else {
-          regionABuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public Builder mergeRegionA(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionABuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              regionA_ != null &&
-              regionA_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
-            regionA_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionA_).mergeFrom(value).buildPartial();
-          } else {
-            regionA_ = value;
-          }
-          onChanged();
-        } else {
-          regionABuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public Builder clearRegionA() {
-        if (regionABuilder_ == null) {
-          regionA_ = null;
-          onChanged();
-        } else {
-          regionABuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionABuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getRegionAFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionAOrBuilder() {
-        if (regionABuilder_ != null) {
-          return regionABuilder_.getMessageOrBuilder();
-        } else {
-          return regionA_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionA_;
-        }
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_a = 1;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-          getRegionAFieldBuilder() {
-        if (regionABuilder_ == null) {
-          regionABuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
-                  getRegionA(),
-                  getParentForChildren(),
-                  isClean());
-          regionA_ = null;
-        }
-        return regionABuilder_;
-      }
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier regionB_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBBuilder_;
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public boolean hasRegionB() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier getRegionB() {
-        if (regionBBuilder_ == null) {
-          return regionB_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
-        } else {
-          return regionBBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public Builder setRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionBBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          regionB_ = value;
-          onChanged();
-        } else {
-          regionBBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public Builder setRegionB(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
-        if (regionBBuilder_ == null) {
-          regionB_ = builderForValue.build();
-          onChanged();
-        } else {
-          regionBBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public Builder mergeRegionB(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier value) {
-        if (regionBBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              regionB_ != null &&
-              regionB_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
-            regionB_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionB_).mergeFrom(value).buildPartial();
-          } else {
-            regionB_ = value;
-          }
-          onChanged();
-        } else {
-          regionBBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public Builder clearRegionB() {
-        if (regionBBuilder_ == null) {
-          regionB_ = null;
-          onChanged();
-        } else {
-          regionBBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBBuilder() {
-        bitField0_ |= 0x00000002;
-        onChanged();
-        return getRegionBFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionBOrBuilder() {
-        if (regionBBuilder_ != null) {
-          return regionBBuilder_.getMessageOrBuilder();
-        } else {
-          return regionB_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance() : regionB_;
-        }
-      }
-      /**
-       * <code>required .hbase.pb.RegionSpecifier region_b = 2;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> 
-          getRegionBFieldBuilder() {
-        if (regionBBuilder_ == null) {
-          regionBBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
-                  getRegionB(),
-                  getParentForChildren(),
-                  isClean());
-          regionB_ = null;
-        }
-        return regionBBuilder_;
-      }
-
-      private boolean forcible_ ;
-      /**
-       * <code>optional bool forcible = 3 [default = false];</code>
-       */
-      public boolean hasForcible() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      /**
-       * <code>optional bool forcible = 3 [default = false];</code>
-       */
-      public boolean getForcible() {
-        return forcible_;
-      }
-      /**
-       * <code>optional bool forcible = 3 [default = false];</code>
-       */
-      public Builder setForcible(boolean value) {
-        bitField0_ |= 0x00000004;
-        forcible_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool forcible = 3 [default = false];</code>
-       */
-      public Builder clearForcible() {
-        bitField0_ = (bitField0_ & ~0x00000004);
-        forcible_ = false;
-        onChanged();
-        return this;
-      }
-
-      private long masterSystemTime_ ;
-      /**
-       * <pre>
-       * wall clock time from master
-       * </pre>
-       *
-       * <code>optional uint64 master_system_time = 4;</code>
-       */
-      public boolean hasMasterSystemTime() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
-      }
-      /**
-       * <pre>
-       * wall clock time from master
-       * </pre>
-       *
-       * <code>optional uint64 master_system_time = 4;</code>
-       */
-      public long getMasterSystemTime() {
-        return masterSystemTime_;
-      }
-      /**
-       * <pre>
-       * wall clock time from master
-       * </pre>
-       *
-       * <code>optional uint64 master_system_time = 4;</code>
-       */
-      public Builder setMasterSystemTime(long value) {
-        bitField0_ |= 0x00000008;
-        masterSystemTime_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <pre>
-       * wall clock time from master
-       * </pre>
-       *
-       * <code>optional uint64 master_system_time = 4;</code>
-       */
-      public Builder clearMasterSystemTime() {
-        bitField0_ = (bitField0_ & ~0x00000008);
-        masterSystemTime_ = 0L;
-        onChanged();
-        return this;
-      }
-      public final Builder setUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.setUnknownFields(unknownFields);
-      }
-
-      public final Builder mergeUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.mergeUnknownFields(unknownFields);
-      }
-
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.MergeRegionsRequest)
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.MergeRegionsRequest)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest DEFAULT_INSTANCE;
-    static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest();
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstance() {
-      return DEFAULT_INSTANCE;
-    }
-
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsRequest>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<MergeRegionsRequest>() {
-      public MergeRegionsRequest parsePartialFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new MergeRegionsRequest(input, extensionRegistry);
-      }
-    };
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsRequest> parser() {
-      return PARSER;
-    }
-
-    @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsRequest> getParserForType() {
-      return PARSER;
-    }
-
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest getDefaultInstanceForType() {
-      return DEFAULT_INSTANCE;
-    }
-
-  }
-
-  public interface MergeRegionsResponseOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.MergeRegionsResponse)
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-  }
-  /**
-   * Protobuf type {@code hbase.pb.MergeRegionsResponse}
-   */
-  public  static final class MergeRegionsResponse extends
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.MergeRegionsResponse)
-      MergeRegionsResponseOrBuilder {
-    // Use MergeRegionsResponse.newBuilder() to construct.
-    private MergeRegionsResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
-      super(builder);
-    }
-    private MergeRegionsResponse() {
-    }
-
-    @java.lang.Override
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-    getUnknownFields() {
-      return this.unknownFields;
-    }
-    private MergeRegionsResponse(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      this();
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-          }
-        }
-      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
-            e).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor;
-    }
-
-    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class);
-    }
-
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized == 1) return true;
-      if (isInitialized == 0) return false;
-
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      unknownFields.writeTo(output);
-    }
-
-    public int getSerializedSize() {
-      int size = memoizedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      size += unknownFields.getSerializedSize();
-      memoizedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) obj;
-
-      boolean result = true;
-      result = result && unknownFields.equals(other.unknownFields);
-      return result;
-    }
-
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      hash = (29 * hash) + unknownFields.hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(byte[] data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        byte[] data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseDelimitedFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder() {
-      return DEFAULT_INSTANCE.toBuilder();
-    }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse prototype) {
-      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() {
-      return this == DEFAULT_INSTANCE
-          ? new Builder() : new Builder().mergeFrom(this);
-    }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.MergeRegionsResponse}
-     */
-    public static final class Builder extends
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.MergeRegionsResponse)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponseOrBuilder {
-      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor;
-      }
-
-      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-                .alwaysUseFieldBuilders) {
-        }
-      }
-      public Builder clear() {
-        super.clear();
-        return this;
-      }
-
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.internal_static_hbase_pb_MergeRegionsResponse_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse(this);
-        onBuilt();
-        return result;
-      }
-
-      public Builder clone() {
-        return (Builder) super.clone();
-      }
-      public Builder setField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.setField(field, value);
-      }
-      public Builder clearField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
-        return (Builder) super.clearField(field);
-      }
-      public Builder clearOneof(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
-        return (Builder) super.clearOneof(oneof);
-      }
-      public Builder setRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          int index, Object value) {
-        return (Builder) super.setRepeatedField(field, index, value);
-      }
-      public Builder addRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.addRepeatedField(field, value);
-      }
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()) return this;
-        this.mergeUnknownFields(other.unknownFields);
-        onChanged();
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        return true;
-      }
-
-      public Builder mergeFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) e.getUnfinishedMessage();
-          throw e.unwrapIOException();
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      public final Builder setUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.setUnknownFields(unknownFields);
-      }
-
-      public final Builder mergeUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.mergeUnknownFields(unknownFields);
-      }
-
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.MergeRegionsResponse)
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.MergeRegionsResponse)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse DEFAULT_INSTANCE;
-    static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse();
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstance() {
-      return DEFAULT_INSTANCE;
-    }
-
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsResponse>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<MergeRegionsResponse>() {
-      public MergeRegionsResponse parsePartialFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new MergeRegionsResponse(input, extensionRegistry);
-      }
-    };
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsResponse> parser() {
-      return PARSER;
-    }
-
-    @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<MergeRegionsResponse> getParserForType() {
-      return PARSER;
-    }
-
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse getDefaultInstanceForType() {
-      return DEFAULT_INSTANCE;
-    }
-
-  }
-
   public interface WALEntryOrBuilder extends
       // @@protoc_insertion_point(interface_extends:hbase.pb.WALEntry)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -26239,14 +24839,6 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse> done);
 
       /**
-       * <code>rpc MergeRegions(.hbase.pb.MergeRegionsRequest) returns (.hbase.pb.MergeRegionsResponse);</code>
-       */
-      public abstract void mergeRegions(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse> done);
-
-      /**
        * <code>rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse);</code>
        */
       public abstract void replicateWALEntry(
@@ -26396,14 +24988,6 @@ public final class AdminProtos {
         }
 
         @java.lang.Override
-        public  void mergeRegions(
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request,
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse> done) {
-          impl.mergeRegions(controller, request, done);
-        }
-
-        @java.lang.Override
         public  void replicateWALEntry(
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request,
@@ -26510,22 +25094,20 @@ public final class AdminProtos {
             case 9:
               return impl.compactRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest)request);
             case 10:
-              return impl.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request);
-            case 11:
               return impl.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
-            case 12:
+            case 11:
               return impl.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request);
-            case 13:
+            case 12:
               return impl.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request);
-            case 14:
+            case 13:
               return impl.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request);
-            case 15:
+            case 14:
               return impl.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request);
-            case 16:
+            case 15:
               return impl.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request);
-            case 17:
+            case 16:
               return impl.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request);
-            case 18:
+            case 17:
               return impl.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -26562,22 +25144,20 @@ public final class AdminProtos {
             case 9:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
             case 10:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
             case 11:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
             case 12:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
-            case 13:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
-            case 14:
+            case 13:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
-            case 15:
+            case 14:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
-            case 16:
+            case 15:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
-            case 17:
+            case 16:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
-            case 18:
+            case 17:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -26614,22 +25194,20 @@ public final class AdminProtos {
             case 9:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
             case 10:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
             case 11:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
             case 12:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
-            case 13:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
-            case 14:
+            case 13:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
-            case 15:
+            case 14:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
-            case 16:
+            case 15:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
-            case 17:
+            case 16:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
-            case 18:
+            case 17:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -26720,14 +25298,6 @@ public final class AdminProtos {
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse> done);
 
     /**
-     * <code>rpc MergeRegions(.hbase.pb.MergeRegionsRequest) returns (.hbase.pb.MergeRegionsResponse);</code>
-     */
-    public abstract void mergeRegions(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-        org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse> done);
-
-    /**
      * <code>rpc ReplicateWALEntry(.hbase.pb.ReplicateWALEntryRequest) returns (.hbase.pb.ReplicateWALEntryResponse);</code>
      */
     public abstract void replicateWALEntry(
@@ -26864,46 +25434,41 @@ public final class AdminProtos {
               done));
           return;
         case 10:
-          this.mergeRegions(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest)request,
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse>specializeCallback(
-              done));
-          return;
-        case 11:
           this.replicateWALEntry(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>specializeCallback(
               done));
           return;
-        case 12:
+        case 11:
           this.replay(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse>specializeCallback(
               done));
           return;
-        case 13:
+        case 12:
           this.rollWALWriter(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse>specializeCallback(
               done));
           return;
-        case 14:
+        case 13:
           this.getServerInfo(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse>specializeCallback(
               done));
           return;
-        case 15:
+        case 14:
           this.stopServer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse>specializeCallback(
               done));
           return;
-        case 16:
+        case 15:
           this.updateFavoredNodes(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse>specializeCallback(
               done));
           return;
-        case 17:
+        case 16:
           this.updateConfiguration(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse>specializeCallback(
               done));
           return;
-        case 18:
+        case 17:
           this.getRegionLoad(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse>specializeCallback(
               done));
@@ -26943,22 +25508,20 @@ public final class AdminProtos {
         case 9:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest.getDefaultInstance();
         case 10:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
         case 11:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
         case 12:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest.getDefaultInstance();
-        case 13:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest.getDefaultInstance();
-        case 14:
+        case 13:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest.getDefaultInstance();
-        case 15:
+        case 14:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest.getDefaultInstance();
-        case 16:
+        case 15:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest.getDefaultInstance();
-        case 17:
+        case 16:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest.getDefaultInstance();
-        case 18:
+        case 17:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -26995,22 +25558,20 @@ public final class AdminProtos {
         case 9:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance();
         case 10:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
         case 11:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
         case 12:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance();
-        case 13:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance();
-        case 14:
+        case 13:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance();
-        case 15:
+        case 14:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance();
-        case 16:
+        case 15:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance();
-        case 17:
+        case 16:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance();
-        case 18:
+        case 17:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -27183,27 +25744,12 @@ public final class AdminProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse.getDefaultInstance()));
       }
 
-      public  void mergeRegions(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse> done) {
-        channel.callMethod(
-          getDescriptor().getMethods().get(10),
-          controller,
-          request,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance(),
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
-            done,
-            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.class,
-            org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance()));
-      }
-
       public  void replicateWALEntry(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(10),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(),
@@ -27218,7 +25764,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance(),
@@ -27233,7 +25779,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(13),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance(),
@@ -27248,7 +25794,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(14),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance(),
@@ -27263,7 +25809,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(15),
+          getDescriptor().getMethods().get(14),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance(),
@@ -27278,7 +25824,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(16),
+          getDescriptor().getMethods().get(15),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse.getDefaultInstance(),
@@ -27293,7 +25839,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(17),
+          getDescriptor().getMethods().get(16),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse.getDefaultInstance(),
@@ -27308,7 +25854,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(18),
+          getDescriptor().getMethods().get(17),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse.getDefaultInstance(),
@@ -27375,11 +25921,6 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
-
       public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
@@ -27548,24 +26089,12 @@ public final class AdminProtos {
       }
 
 
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse mergeRegions(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest request)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
-        return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(10),
-          controller,
-          request,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse.getDefaultInstance());
-      }
-
-
       public org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse replicateWALEntry(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(11),
+          getDescriptor().getMethods().get(10),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance());
@@ -27577,7 +26106,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(12),
+          getDescriptor().getMethods().get(11),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse.getDefaultInstance());
@@ -27589,7 +26118,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(13),
+          getDescriptor().getMethods().get(12),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse.getDefaultInstance());
@@ -27601,7 +26130,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(14),
+          getDescriptor().getMethods().get(13),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse.getDefaultInstance());
@@ -27613,7 +26142,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(15),
+          getDescriptor().getMethods().get(14),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse.getDefaultInstance());
@@ -27625,7 +26154,7 @@ public final class AdminProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest request)


<TRUNCATED>

[38/50] [abbrv] hbase git commit: HBASE-17491 Remove all setters from HTable interface and introduce a TableBuilder to build Table instance

Posted by el...@apache.org.
HBASE-17491 Remove all setters from HTable interface and introduce a TableBuilder to build Table instance


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/07e0a30e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/07e0a30e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/07e0a30e

Branch: refs/heads/HBASE-16961
Commit: 07e0a30efa332ab451e5f5729dd8257eced82c4d
Parents: 7754a96
Author: Yu Li <li...@apache.org>
Authored: Mon Jan 23 13:51:03 2017 +0800
Committer: Yu Li <li...@apache.org>
Committed: Mon Jan 23 13:57:01 2017 +0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/client/Connection.java  | 15 +++-
 .../hbase/client/ConnectionConfiguration.java   | 11 ++-
 .../hbase/client/ConnectionImplementation.java  | 12 ++-
 .../org/apache/hadoop/hbase/client/HTable.java  | 55 ++++++-------
 .../org/apache/hadoop/hbase/client/Table.java   |  6 ++
 .../hadoop/hbase/client/TableBuilder.java       | 71 +++++++++++++++++
 .../hadoop/hbase/client/TableBuilderBase.java   | 83 ++++++++++++++++++++
 .../hbase/spark/HBaseConnectionCacheSuite.scala |  3 +-
 8 files changed, 222 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index b979c6a..a8cd296 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -83,7 +83,9 @@ public interface Connection extends Abortable, Closeable {
    * @param tableName the name of the table
    * @return a Table to use for interactions with this table
    */
-  Table getTable(TableName tableName) throws IOException;
+  default Table getTable(TableName tableName) throws IOException {
+    return getTable(tableName, null);
+  }
 
   /**
    * Retrieve a Table implementation for accessing a table.
@@ -102,7 +104,9 @@ public interface Connection extends Abortable, Closeable {
    * @param pool The thread pool to use for batch operations, null to use a default pool.
    * @return a Table to use for interactions with this table
    */
-  Table getTable(TableName tableName, ExecutorService pool)  throws IOException;
+  default Table getTable(TableName tableName, ExecutorService pool) throws IOException {
+    return getTableBuilder(tableName, pool).build();
+  }
 
   /**
    * <p>
@@ -173,4 +177,11 @@ public interface Connection extends Abortable, Closeable {
    * @return true if this connection is closed
    */
   boolean isClosed();
+
+  /**
+   * Returns an {@link TableBuilder} for creating {@link Table}.
+   * @param tableName the name of the table
+   * @param pool the thread pool to use for requests like batch and scan
+   */
+  TableBuilder getTableBuilder(TableName tableName, ExecutorService pool);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
index 41f5baf..bea91da 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java
@@ -42,9 +42,10 @@ public class ConnectionConfiguration {
   private final int replicaCallTimeoutMicroSecondScan;
   private final int retries;
   private final int maxKeyValueSize;
+  private final int rpcTimeout;
   private final int readRpcTimeout;
   private final int writeRpcTimeout;
-    // toggle for async/sync prefetch
+  // toggle for async/sync prefetch
   private final boolean clientScannerAsyncPrefetch;
 
     /**
@@ -82,6 +83,9 @@ public class ConnectionConfiguration {
 
     this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT);
 
+    this.rpcTimeout =
+        conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
+
     this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY,
         conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT));
 
@@ -108,6 +112,7 @@ public class ConnectionConfiguration {
     this.maxKeyValueSize = MAX_KEYVALUE_SIZE_DEFAULT;
     this.readRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
     this.writeRpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
+    this.rpcTimeout = HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
   }
 
   public int getReadRpcTimeout() {
@@ -158,4 +163,8 @@ public class ConnectionConfiguration {
     return clientScannerAsyncPrefetch;
   }
 
+  public int getRpcTimeout() {
+    return rpcTimeout;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index adbc7f9..ca21365 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -327,9 +327,15 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
   }
 
   @Override
-  public Table getTable(TableName tableName, ExecutorService pool) throws IOException {
-    return new HTable(tableName, this, connectionConfig,
-      rpcCallerFactory, rpcControllerFactory, pool);
+  public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) {
+    return new TableBuilderBase(tableName, connectionConfig) {
+
+      @Override
+      public Table build() {
+        return new HTable(ConnectionImplementation.this, this, rpcCallerFactory,
+            rpcControllerFactory, pool);
+      }
+    };
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index fd5eda3..3bb0a77 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -82,10 +82,9 @@ import org.apache.hadoop.hbase.util.Threads;
  * Obtain an instance via {@link Connection}. See {@link ConnectionFactory}
  * class comment for an example of how.
  *
- * <p>This class is NOT thread safe for reads nor writes.
- * In the case of writes (Put, Delete), the underlying write buffer can
- * be corrupted if multiple threads contend over a single HTable instance.
- * In the case of reads, some fields used by a Scan are shared among all threads.
+ * <p>This class is thread safe since 2.0.0 if not invoking any of the setter methods.
+ * All setters are moved into {@link TableBuilder} and reserved here only for keeping
+ * backward compatibility, and TODO will be removed soon.
  *
  * <p>HTable is no longer a client API. Use {@link Table} instead. It is marked
  * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in
@@ -115,10 +114,12 @@ public class HTable implements Table {
   private final long scannerMaxResultSize;
   private final ExecutorService pool;  // For Multi & Scan
   private int operationTimeout; // global timeout for each blocking method with retrying rpc
+  private final int rpcTimeout; // FIXME we should use this for rpc like batch and checkAndXXX
   private int readRpcTimeout; // timeout for each read rpc request
   private int writeRpcTimeout; // timeout for each write rpc request
   private final boolean cleanupPoolOnClose; // shutdown the pool in close()
   private final HRegionLocator locator;
+  private final long writeBufferSize;
 
   /** The Async process for batch */
   @VisibleForTesting
@@ -150,31 +151,24 @@ public class HTable implements Table {
    * Creates an object to access a HBase table.
    * Used by HBase internally.  DO NOT USE. See {@link ConnectionFactory} class comment for how to
    * get a {@link Table} instance (use {@link Table} instead of {@link HTable}).
-   * @param tableName Name of the table.
    * @param connection Connection to be used.
+   * @param builder The table builder
+   * @param rpcCallerFactory The RPC caller factory
+   * @param rpcControllerFactory The RPC controller factory
    * @param pool ExecutorService to be used.
-   * @throws IOException if a remote or network exception occurs
    */
   @InterfaceAudience.Private
-  protected HTable(TableName tableName, final ClusterConnection connection,
-      final ConnectionConfiguration tableConfig,
+  protected HTable(final ClusterConnection connection,
+      final TableBuilderBase builder,
       final RpcRetryingCallerFactory rpcCallerFactory,
       final RpcControllerFactory rpcControllerFactory,
-      final ExecutorService pool) throws IOException {
+      final ExecutorService pool) {
     if (connection == null || connection.isClosed()) {
       throw new IllegalArgumentException("Connection is null or closed.");
     }
-    if (tableName == null) {
-      throw new IllegalArgumentException("Given table name is null");
-    }
-    this.tableName = tableName;
     this.connection = connection;
     this.configuration = connection.getConfiguration();
-    if (tableConfig == null) {
-      connConfiguration = new ConnectionConfiguration(configuration);
-    } else {
-      connConfiguration = tableConfig;
-    }
+    this.connConfiguration = connection.getConnectionConfiguration();
     if (pool == null) {
       this.pool = getDefaultExecutor(this.configuration);
       this.cleanupPoolOnClose = true;
@@ -194,10 +188,12 @@ public class HTable implements Table {
       this.rpcControllerFactory = rpcControllerFactory;
     }
 
-    this.operationTimeout = tableName.isSystemTable() ?
-        connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
-    this.readRpcTimeout = connConfiguration.getReadRpcTimeout();
-    this.writeRpcTimeout = connConfiguration.getWriteRpcTimeout();
+    this.tableName = builder.tableName;
+    this.operationTimeout = builder.operationTimeout;
+    this.rpcTimeout = builder.rpcTimeout;
+    this.readRpcTimeout = builder.readRpcTimeout;
+    this.writeRpcTimeout = builder.writeRpcTimeout;
+    this.writeBufferSize = builder.writeBufferSize;
     this.scannerCaching = connConfiguration.getScannerCaching();
     this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
 
@@ -215,15 +211,16 @@ public class HTable implements Table {
     connection = conn;
     this.tableName = mutator.getName();
     this.configuration = connection.getConfiguration();
-    connConfiguration = new ConnectionConfiguration(configuration);
+    connConfiguration = connection.getConnectionConfiguration();
     cleanupPoolOnClose = false;
     this.mutator = mutator;
-    this.operationTimeout = tableName.isSystemTable() ?
-        connConfiguration.getMetaOperationTimeout() : connConfiguration.getOperationTimeout();
+    this.operationTimeout = connConfiguration.getOperationTimeout();
+    this.rpcTimeout = connConfiguration.getRpcTimeout();
     this.readRpcTimeout = connConfiguration.getReadRpcTimeout();
     this.writeRpcTimeout = connConfiguration.getWriteRpcTimeout();
     this.scannerCaching = connConfiguration.getScannerCaching();
     this.scannerMaxResultSize = connConfiguration.getScannerMaxResultSize();
+    this.writeBufferSize = connConfiguration.getWriteBufferSize();
     this.rpcControllerFactory = null;
     this.rpcCallerFactory = null;
     this.pool = mutator.getPool();
@@ -1058,6 +1055,7 @@ public class HTable implements Table {
    * @throws IOException if a remote or network exception occurs.
    */
   @Override
+  @Deprecated
   public void setWriteBufferSize(long writeBufferSize) throws IOException {
     getBufferedMutator();
     mutator.setWriteBufferSize(writeBufferSize);
@@ -1162,6 +1160,7 @@ public class HTable implements Table {
   }
 
   @Override
+  @Deprecated
   public void setOperationTimeout(int operationTimeout) {
     this.operationTimeout = operationTimeout;
     if (mutator != null) {
@@ -1177,7 +1176,7 @@ public class HTable implements Table {
   @Override
   @Deprecated
   public int getRpcTimeout() {
-    return readRpcTimeout;
+    return rpcTimeout;
   }
 
   @Override
@@ -1193,6 +1192,7 @@ public class HTable implements Table {
   }
 
   @Override
+  @Deprecated
   public void setWriteRpcTimeout(int writeRpcTimeout) {
     this.writeRpcTimeout = writeRpcTimeout;
     if (mutator != null) {
@@ -1204,6 +1204,7 @@ public class HTable implements Table {
   public int getReadRpcTimeout() { return readRpcTimeout; }
 
   @Override
+  @Deprecated
   public void setReadRpcTimeout(int readRpcTimeout) {
     this.readRpcTimeout = readRpcTimeout;
   }
@@ -1335,7 +1336,7 @@ public class HTable implements Table {
       this.mutator = (BufferedMutatorImpl) connection.getBufferedMutator(
           new BufferedMutatorParams(tableName)
               .pool(pool)
-              .writeBufferSize(connConfiguration.getWriteBufferSize())
+              .writeBufferSize(writeBufferSize)
               .maxKeyValueSize(connConfiguration.getMaxKeyValueSize())
               .opertationTimeout(operationTimeout)
               .rpcTimeout(writeRpcTimeout)

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 0f30cb4..90fee8d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -593,7 +593,9 @@ public interface Table extends Closeable {
    * total time being blocking reach the operation timeout before retries exhausted, it will break
    * early and throw SocketTimeoutException.
    * @param operationTimeout the total timeout of each operation in millisecond.
+   * @deprecated since 2.0.0, use {@link TableBuilder#setOperationTimeout} instead
    */
+  @Deprecated
   void setOperationTimeout(int operationTimeout);
 
   /**
@@ -637,7 +639,9 @@ public interface Table extends Closeable {
    * until retries exhausted or operation timeout reached.
    *
    * @param readRpcTimeout
+   * @deprecated since 2.0.0, use {@link TableBuilder#setReadRpcTimeout} instead
    */
+  @Deprecated
   void setReadRpcTimeout(int readRpcTimeout);
 
   /**
@@ -652,6 +656,8 @@ public interface Table extends Closeable {
    * until retries exhausted or operation timeout reached.
    *
    * @param writeRpcTimeout
+   * @deprecated since 2.0.0, use {@link TableBuilder#setWriteRpcTimeout} instead
    */
+  @Deprecated
   void setWriteRpcTimeout(int writeRpcTimeout);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java
new file mode 100644
index 0000000..27e1596
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilder.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * For creating {@link Table} instance.
+ * <p>
+ * The implementation should have default configurations set before returning the builder to user.
+ * So users are free to only set the configurations they care about to create a new
+ * Table instance.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public interface TableBuilder {
+
+  /**
+   * Set timeout for a whole operation such as get, put or delete. Notice that scan will not be
+   * effected by this value, see scanTimeoutNs.
+   * <p>
+   * Operation timeout and max attempt times(or max retry times) are both limitations for retrying,
+   * we will stop retrying when we reach any of the limitations.
+   */
+  TableBuilder setOperationTimeout(int timeout);
+
+  /**
+   * Set timeout for each rpc request.
+   * <p>
+   * Notice that this will <strong>NOT</strong> change the rpc timeout for read(get, scan) request
+   * and write request(put, delete).
+   */
+  TableBuilder setRpcTimeout(int timeout);
+
+  /**
+   * Set timeout for each read(get, scan) rpc request.
+   */
+  TableBuilder setReadRpcTimeout(int timeout);
+
+  /**
+   * Set timeout for each write(put, delete) rpc request.
+   */
+  TableBuilder setWriteRpcTimeout(int timeout);
+
+  /**
+   * Set the write buffer size which by default is specified by the
+   * {@code hbase.client.write.buffer} setting.
+   */
+  TableBuilder setWriteBufferSize(long writeBufferSize);
+
+  /**
+   * Create the {@link Table} instance.
+   */
+  Table build();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java
new file mode 100644
index 0000000..adf1abb
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableBuilderBase.java
@@ -0,0 +1,83 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+/**
+ * Base class for all table builders.
+ */
+@InterfaceAudience.Private
+abstract class TableBuilderBase implements TableBuilder {
+
+  protected TableName tableName;
+
+  protected int operationTimeout;
+
+  protected int rpcTimeout;
+
+  protected int readRpcTimeout;
+
+  protected int writeRpcTimeout;
+
+  protected long writeBufferSize;
+
+  TableBuilderBase(TableName tableName, ConnectionConfiguration connConf) {
+    if (tableName == null) {
+      throw new IllegalArgumentException("Given table name is null");
+    }
+    this.tableName = tableName;
+    this.operationTimeout = tableName.isSystemTable() ? connConf.getMetaOperationTimeout()
+        : connConf.getOperationTimeout();
+    this.rpcTimeout = connConf.getRpcTimeout();
+    this.readRpcTimeout = connConf.getReadRpcTimeout();
+    this.writeRpcTimeout = connConf.getWriteRpcTimeout();
+    this.writeBufferSize = connConf.getWriteBufferSize();
+  }
+
+  @Override
+  public TableBuilderBase setOperationTimeout(int timeout) {
+    this.operationTimeout = timeout;
+    return this;
+  }
+
+  @Override
+  public TableBuilderBase setRpcTimeout(int timeout) {
+    this.rpcTimeout = timeout;
+    return this;
+  }
+
+  @Override
+  public TableBuilderBase setReadRpcTimeout(int timeout) {
+    this.readRpcTimeout = timeout;
+    return this;
+  }
+
+  @Override
+  public TableBuilderBase setWriteRpcTimeout(int timeout) {
+    this.writeRpcTimeout = timeout;
+    return this;
+  }
+
+  @Override
+  public TableBuilder setWriteBufferSize(long writeBufferSize) {
+    this.writeBufferSize = writeBufferSize;
+    return this;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/07e0a30e/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala
index 6ebf044..b3fdd4e 100644
--- a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala
+++ b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/HBaseConnectionCacheSuite.scala
@@ -21,7 +21,7 @@ import java.util.concurrent.ExecutorService
 import scala.util.Random
 
 import org.apache.hadoop.hbase.client.{BufferedMutator, Table, RegionLocator,
-  Connection, BufferedMutatorParams, Admin}
+  Connection, BufferedMutatorParams, Admin, TableBuilder}
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.hbase.TableName
 import org.apache.spark.Logging
@@ -50,6 +50,7 @@ class ConnectionMocker extends Connection {
   def getBufferedMutator (params: BufferedMutatorParams): BufferedMutator = null
   def getBufferedMutator (tableName: TableName): BufferedMutator = null
   def getAdmin: Admin = null
+  def getTableBuilder(tableName: TableName, pool: ExecutorService): TableBuilder = null
 
   def close(): Unit = {
     if (isClosed)


[30/50] [abbrv] hbase git commit: HBASE-10699 Set capacity on ArrayList where possible and use isEmpty instead of size() == 0

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
index 06e1698..c9579d6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationDisableInactivePeer.java
@@ -80,7 +80,7 @@ public class TestReplicationDisableInactivePeer extends TestReplicationBase {
     Thread.sleep(SLEEP_TIME * NB_RETRIES);
     for (int i = 0; i < NB_RETRIES; i++) {
       Result res = htable2.get(get);
-      if (res.size() == 0) {
+      if (res.isEmpty()) {
         LOG.info("Row not available");
         Thread.sleep(SLEEP_TIME * NB_RETRIES);
       } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
index 6519b46..58249f1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSmallTests.java
@@ -224,7 +224,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
         fail("Waited too much time for put replication");
       }
       Result res = htable2.get(get);
-      if (res.size() == 0) {
+      if (res.isEmpty()) {
         LOG.info("Row not available");
         Thread.sleep(SLEEP_TIME);
       } else {
@@ -291,7 +291,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
   }
 
   private void loadData(String prefix, byte[] row) throws IOException {
-    List<Put> puts = new ArrayList<>();
+    List<Put> puts = new ArrayList<>(NB_ROWS_IN_BATCH);
     for (int i = 0; i < NB_ROWS_IN_BATCH; i++) {
       Put put = new Put(Bytes.toBytes(prefix + Integer.toString(i)));
       put.addColumn(famName, row, row);
@@ -333,7 +333,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
 
     for (int i = 0; i < NB_RETRIES; i++) {
       Result res = htable2.get(get);
-      if (res.size() == 0) {
+      if (res.isEmpty()) {
         LOG.info("Row not available");
         Thread.sleep(SLEEP_TIME);
       } else {
@@ -389,7 +389,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
         fail("Waited too much time for put replication");
       }
       Result res = htable2.get(get);
-      if (res.size() == 0) {
+      if (res.isEmpty()) {
         LOG.info("Row not available");
         Thread.sleep(SLEEP_TIME*i);
       } else {
@@ -408,7 +408,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
   @Test(timeout=300000)
   public void testLoading() throws Exception {
     LOG.info("Writing out rows to table1 in testLoading");
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(NB_ROWS_IN_BIG_BATCH);
     for (int i = 0; i < NB_ROWS_IN_BIG_BATCH; i++) {
       Put put = new Put(Bytes.toBytes(i));
       put.addColumn(famName, row, row);
@@ -544,7 +544,7 @@ public class TestReplicationSmallTests extends TestReplicationBase {
           fail("Waited too much time for put replication");
         }
         Result res = lHtable2.get(get);
-        if (res.size() == 0) {
+        if (res.isEmpty()) {
           LOG.info("Row not available");
           Thread.sleep(SLEEP_TIME);
         } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index f8be9a7..c7c1b89 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -215,7 +215,7 @@ public abstract class TestReplicationStateBasic {
     rq1.addHFileRefs(ID_ONE, files1);
     assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size());
     assertEquals(3, rqc.getReplicableHFiles(ID_ONE).size());
-    List<String> hfiles2 = new ArrayList<>();
+    List<String> hfiles2 = new ArrayList<>(files1.size());
     for (Pair<Path, Path> p : files1) {
       hfiles2.add(p.getSecond().getName());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
index a680f70..f2c1c23 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWithTags.java
@@ -181,7 +181,7 @@ public class TestReplicationWithTags {
           fail("Waited too much time for put replication");
         }
         Result res = htable2.get(get);
-        if (res.size() == 0) {
+        if (res.isEmpty()) {
           LOG.info("Row not available");
           Thread.sleep(SLEEP_TIME);
         } else {
@@ -212,7 +212,7 @@ public class TestReplicationWithTags {
               cf = CellUtil.cloneFamily(kv);
             }
             Tag tag = new ArrayBackedTag(TAG_TYPE, attribute);
-            List<Tag> tagList = new ArrayList<Tag>();
+            List<Tag> tagList = new ArrayList<Tag>(1);
             tagList.add(tag);
 
             KeyValue newKV = new KeyValue(CellUtil.cloneRow(kv), 0, kv.getRowLength(),

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
index c9f4319..752afe8 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestSerialReplication.java
@@ -315,7 +315,7 @@ public class TestSerialReplication {
   }
 
   private List<Integer> getRowNumbers(List<Cell> cells) {
-    List<Integer> listOfRowNumbers = new ArrayList<>();
+    List<Integer> listOfRowNumbers = new ArrayList<>(cells.size());
     for (Cell c : cells) {
       listOfRowNumbers.add(Integer.parseInt(Bytes
           .toString(c.getRowArray(), c.getRowOffset() + ROW.length,

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
index 7e4ae45..4331239 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestGlobalThrottler.java
@@ -173,7 +173,7 @@ public class TestGlobalThrottler {
   }
 
   private List<Integer> getRowNumbers(List<Cell> cells) {
-    List<Integer> listOfRowNumbers = new ArrayList<>();
+    List<Integer> listOfRowNumbers = new ArrayList<>(cells.size());
     for (Cell c : cells) {
       listOfRowNumbers.add(Integer.parseInt(Bytes
           .toString(c.getRowArray(), c.getRowOffset() + ROW.length,

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 9d1d165..af3bf83 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -225,7 +225,7 @@ public abstract class TestReplicationSourceManager {
     WALEdit edit = new WALEdit();
     edit.add(kv);
 
-    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
+    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>(1);
     listeners.add(replication);
     final WALFactory wals = new WALFactory(utility.getConfiguration(), listeners,
         URLEncoder.encode("regionserver:60020", "UTF8"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationWALReaderManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationWALReaderManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationWALReaderManager.java
index 3ef658f..a76cec9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationWALReaderManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationWALReaderManager.java
@@ -141,7 +141,7 @@ public class TestReplicationWALReaderManager {
   @Before
   public void setUp() throws Exception {
     logManager = new ReplicationWALReaderManager(fs, conf);
-    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>();
+    List<WALActionsListener> listeners = new ArrayList<WALActionsListener>(1);
     pathWatcher = new PathWatcher();
     listeners.add(pathWatcher);
     final WALFactory wals = new WALFactory(conf, listeners, tn.getMethodName());

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 8685b44..cc6425f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -1759,10 +1759,12 @@ public class TestAccessController extends SecureTestUtil {
     } finally {
       acl.close();
     }
-    List<UserPermission> adminPerms = new ArrayList<UserPermission>();
+
+    List<String> superUsers = Superusers.getSuperUsers();
+    List<UserPermission> adminPerms = new ArrayList<UserPermission>(superUsers.size() + 1);
     adminPerms.add(new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()),
       AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW")));
-    List<String> superUsers = Superusers.getSuperUsers();
+
     for(String user: superUsers) {
       adminPerms.add(new UserPermission(Bytes.toBytes(user), AccessControlLists.ACL_TABLE_NAME,
           null, null, Action.values()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java
index b8e7b53..6582751 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestZKPermissionsWatcher.java
@@ -111,7 +111,7 @@ public class TestZKPermissionsWatcher {
       TablePermission.Action.WRITE));
 
     // update ACL: george RW
-    List<TablePermission> acl = new ArrayList<TablePermission>();
+    List<TablePermission> acl = new ArrayList<TablePermission>(1);
     acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ,
       TablePermission.Action.WRITE));
     final long mtimeB = AUTH_B.getMTime();
@@ -144,7 +144,7 @@ public class TestZKPermissionsWatcher {
       TablePermission.Action.WRITE));
 
     // update ACL: hubert R
-    acl = new ArrayList<TablePermission>();
+    acl = new ArrayList<TablePermission>(1);
     acl.add(new TablePermission(TEST_TABLE, null, TablePermission.Action.READ));
     final long mtimeA = AUTH_A.getMTime();
     AUTH_B.setTableUserPermissions("hubert", TEST_TABLE, acl);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
index ab2bacc..0cbe554 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabels.java
@@ -456,7 +456,7 @@ public abstract class TestVisibilityLabels {
         } catch (Throwable e) {
           fail("Should not have failed");
         }
-        List<String> authsList = new ArrayList<String>();
+        List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
         for (ByteString authBS : authsResponse.getAuthList()) {
           authsList.add(Bytes.toString(authBS.toByteArray()));
         }
@@ -482,7 +482,7 @@ public abstract class TestVisibilityLabels {
           }
         } catch (Throwable e) {
         }
-        List<String> authsList = new ArrayList<String>();
+        List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
         for (ByteString authBS : authsResponse.getAuthList()) {
           authsList.add(Bytes.toString(authBS.toByteArray()));
         }
@@ -557,7 +557,7 @@ public abstract class TestVisibilityLabels {
         } catch (Throwable e) {
           fail("Should not have failed");
         }
-        List<String> authsList = new ArrayList<String>();
+        List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
         for (ByteString authBS : authsResponse.getAuthList()) {
           authsList.add(Bytes.toString(authBS.toByteArray()));
         }
@@ -853,7 +853,7 @@ public abstract class TestVisibilityLabels {
 
   static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
       throws Exception {
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(labelExps.length);
     for (int i = 0; i < labelExps.length; i++) {
       Put put = new Put(Bytes.toBytes("row" + (i+1)));
       put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java
index 06fc7be..307bd00 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsOpWithDifferentUsersNoACL.java
@@ -128,7 +128,7 @@ public class TestVisibilityLabelsOpWithDifferentUsersNoACL {
     authsResponse = NORMAL_USER1.runAs(action1);
     assertTrue(authsResponse.getAuthList().isEmpty());
     authsResponse = SUPERUSER.runAs(action1);
-    List<String> authsList = new ArrayList<String>();
+    List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
     for (ByteString authBS : authsResponse.getAuthList()) {
       authsList.add(Bytes.toString(authBS.toByteArray()));
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index 31b74fb..48703b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -379,7 +379,7 @@ public class TestVisibilityLabelsReplication {
   static Table writeData(TableName tableName, String... labelExps) throws Exception {
     Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME);
     int i = 1;
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(labelExps.length);
     for (String labelExp : labelExps) {
       Put put = new Put(Bytes.toBytes("row" + i));
       put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
@@ -409,7 +409,7 @@ public class TestVisibilityLabelsReplication {
               cf = CellUtil.cloneFamily(kv);
             }
             Tag tag = new ArrayBackedTag((byte) NON_VIS_TAG_TYPE, attribute);
-            List<Tag> tagList = new ArrayList<Tag>();
+            List<Tag> tagList = new ArrayList<Tag>(kv.getTags().size() + 1);
             tagList.add(tag);
             tagList.addAll(kv.getTags());
             Cell newcell = CellUtil.createCell(kv, tagList);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java
index f67296d..e236be2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithACL.java
@@ -300,7 +300,7 @@ public class TestVisibilityLabelsWithACL {
     GetAuthsResponse authsResponse = NORMAL_USER1.runAs(action1);
     assertNull(authsResponse);
     authsResponse = SUPERUSER.runAs(action1);
-    List<String> authsList = new ArrayList<String>();
+    List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
     for (ByteString authBS : authsResponse.getAuthList()) {
       authsList.add(Bytes.toString(authBS.toByteArray()));
     }
@@ -315,7 +315,7 @@ public class TestVisibilityLabelsWithACL {
     try {
       table = TEST_UTIL.createTable(tableName, fam);
       int i = 1;
-      List<Put> puts = new ArrayList<Put>();
+      List<Put> puts = new ArrayList<Put>(labelExps.length);
       for (String labelExp : labelExps) {
         Put put = new Put(Bytes.toBytes("row" + i));
         put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
index 56078d8..f6510f3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDeletes.java
@@ -1146,7 +1146,7 @@ public class TestVisibilityLabelsWithDeletes {
     desc.addFamily(colDesc);
     hBaseAdmin.createTable(desc);
 
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(5);
     Put put = new Put(Bytes.toBytes("row1"));
     put.addColumn(fam, qual, 123l, value);
     put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
@@ -1196,7 +1196,7 @@ public class TestVisibilityLabelsWithDeletes {
     desc.addFamily(colDesc);
     hBaseAdmin.createTable(desc);
 
-    List<Put> puts = new ArrayList<>();
+    List<Put> puts = new ArrayList<>(5);
     Put put = new Put(Bytes.toBytes("row1"));
     put.addColumn(fam, qual, 123l, value);
     put.setCellVisibility(new CellVisibility(CONFIDENTIAL));
@@ -1237,7 +1237,7 @@ public class TestVisibilityLabelsWithDeletes {
     HTableDescriptor desc = new HTableDescriptor(tableName);
     desc.addFamily(colDesc);
     hBaseAdmin.createTable(desc);
-    List<Put> puts = new ArrayList<>();
+    List<Put> puts = new ArrayList<>(5);
     Put put = new Put(Bytes.toBytes("row1"));
     put.addColumn(fam, qual, 123l, value);
     puts.add(put);
@@ -3225,7 +3225,7 @@ public class TestVisibilityLabelsWithDeletes {
     Table table = null;
     table = TEST_UTIL.createTable(tableName, fam);
     int i = 1;
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(labelExps.length);
     for (String labelExp : labelExps) {
       Put put = new Put(Bytes.toBytes("row" + i));
       put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
@@ -3243,7 +3243,7 @@ public class TestVisibilityLabelsWithDeletes {
     Table table = null;
     table = TEST_UTIL.createTable(tableName, fam);
     int i = 1;
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(labelExps.length);
     for (String labelExp : labelExps) {
       Put put = new Put(Bytes.toBytes("row" + i));
       put.addColumn(fam, qual, timestamp[i - 1], value);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java
index 1410c78..940d6dc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLablesWithGroups.java
@@ -185,7 +185,7 @@ public class TestVisibilityLablesWithGroups {
         } catch (Throwable e) {
           fail("Should not have failed");
         }
-        List<String> authsList = new ArrayList<String>();
+        List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
         for (ByteString authBS : authsResponse.getAuthList()) {
           authsList.add(Bytes.toString(authBS.toByteArray()));
         }
@@ -302,7 +302,7 @@ public class TestVisibilityLablesWithGroups {
         } catch (Throwable e) {
           fail("Should not have failed");
         }
-        List<String> authsList = new ArrayList<String>();
+        List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
         for (ByteString authBS : authsResponse.getAuthList()) {
           authsList.add(Bytes.toString(authBS.toByteArray()));
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java
index 1d027d0..ff348db 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestWithDisabledAuthorization.java
@@ -142,7 +142,7 @@ public class TestWithDisabledAuthorization {
           } catch (Throwable t) {
             fail("Should not have failed");
           }
-          List<String> authsList = new ArrayList<String>();
+          List<String> authsList = new ArrayList<String>(authsResponse.getAuthList().size());
           for (ByteString authBS : authsResponse.getAuthList()) {
             authsList.add(Bytes.toString(authBS.toByteArray()));
           }
@@ -227,7 +227,7 @@ public class TestWithDisabledAuthorization {
 
   static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
       throws Exception {
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(labelExps.length + 1);
     for (int i = 0; i < labelExps.length; i++) {
       Put put = new Put(Bytes.toBytes("row" + (i+1)));
       put.addColumn(TEST_FAMILY, TEST_QUALIFIER, HConstants.LATEST_TIMESTAMP, ZERO);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java
index 55fc2fb..77cfbcc 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java
@@ -47,7 +47,7 @@ public class TestExportSnapshotHelpers {
   @Test
   public void testBalanceSplit() throws Exception {
     // Create a list of files
-    List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<Pair<SnapshotFileInfo, Long>>();
+    List<Pair<SnapshotFileInfo, Long>> files = new ArrayList<Pair<SnapshotFileInfo, Long>>(21);
     for (long i = 0; i <= 20; i++) {
       SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder()
         .setType(SnapshotFileInfo.Type.HFILE)

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
index 780cf12..574d546 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/BaseTestHBaseFsck.java
@@ -259,7 +259,7 @@ public class BaseTestHBaseFsck {
     createTable(TEST_UTIL, desc, SPLITS);
 
     tbl = connection.getTable(tablename, tableExecutorService);
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(ROWKEYS.length);
     for (byte[] row : ROWKEYS) {
       Put p = new Put(row);
       p.addColumn(FAM, Bytes.toBytes("val"), row);
@@ -283,7 +283,7 @@ public class BaseTestHBaseFsck {
     createTable(TEST_UTIL, desc, SPLITS);
 
     tbl = connection.getTable(tablename, tableExecutorService);
-    List<Put> puts = new ArrayList<Put>();
+    List<Put> puts = new ArrayList<Put>(ROWKEYS.length);
     for (byte[] row : ROWKEYS) {
       Put p = new Put(row);
       p.addColumn(FAM, Bytes.toBytes("val"), row);
@@ -335,7 +335,7 @@ public class BaseTestHBaseFsck {
 
       // list all online regions from this region server
       List<HRegionInfo> regions = ProtobufUtil.getOnlineRegions(server);
-      List<String> regionNames = new ArrayList<>();
+      List<String> regionNames = new ArrayList<>(regions.size());
       for (HRegionInfo hri : regions) {
         regionNames.add(hri.getRegionNameAsString());
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
index 7b5aed5..d68c578 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/HFileArchiveTestingUtil.java
@@ -143,7 +143,7 @@ public class HFileArchiveTestingUtil {
    * @return <expected, gotten, backup>, where each is sorted
    */
   private static List<List<String>> getFileLists(FileStatus[] previous, FileStatus[] archived) {
-    List<List<String>> files = new ArrayList<List<String>>();
+    List<List<String>> files = new ArrayList<List<String>>(3);
 
     // copy over the original files
     List<String> originalFileNames = convertToString(previous);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 257dfc5..88c2fd9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -978,7 +978,7 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
         undeployRegion(connection, firstSN, daughters.getFirst());
         undeployRegion(connection, secondSN, daughters.getSecond());
 
-        List<Delete> deletes = new ArrayList<>();
+        List<Delete> deletes = new ArrayList<>(2);
         deletes.add(new Delete(daughters.getFirst().getRegionName()));
         deletes.add(new Delete(daughters.getSecond().getRegionName()));
         meta.delete(deletes);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
index 95d8b4a..e7a6500 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSizeCalculator.java
@@ -118,7 +118,7 @@ public class TestRegionSizeCalculator {
   private RegionLocator mockRegionLocator(String... regionNames) throws IOException {
     RegionLocator mockedTable = Mockito.mock(RegionLocator.class);
     when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable"));
-    List<HRegionLocation> regionLocations = new ArrayList<>();
+    List<HRegionLocation> regionLocations = new ArrayList<>(regionNames.length);
     when(mockedTable.getAllRegionLocations()).thenReturn(regionLocations);
 
     for (String regionName : regionNames) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java
index c35491d..ea2bc7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitCalculator.java
@@ -364,7 +364,7 @@ public class TestRegionSplitCalculator {
     SimpleRange ae = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("E"));
     SimpleRange ac = new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C"));
 
-    Collection<SimpleRange> bigOverlap = new ArrayList<SimpleRange>();
+    Collection<SimpleRange> bigOverlap = new ArrayList<SimpleRange>(8);
     bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("E")));
     bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("C")));
     bigOverlap.add(new SimpleRange(Bytes.toBytes("A"), Bytes.toBytes("B")));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java
index f54771c..00c38f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestRegionSplitter.java
@@ -73,7 +73,7 @@ public class TestRegionSplitter {
      */
     @Test
     public void testCreatePresplitTableHex() throws Exception {
-      final List<byte[]> expectedBounds = new ArrayList<byte[]>();
+      final List<byte[]> expectedBounds = new ArrayList<byte[]>(17);
       expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
       expectedBounds.add("10000000".getBytes());
       expectedBounds.add("20000000".getBytes());
@@ -103,7 +103,7 @@ public class TestRegionSplitter {
      */
     @Test
     public void testCreatePresplitTableUniform() throws Exception {
-      List<byte[]> expectedBounds = new ArrayList<byte[]>();
+      List<byte[]> expectedBounds = new ArrayList<byte[]>(17);
       expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
       expectedBounds.add(new byte[] {      0x10, 0, 0, 0, 0, 0, 0, 0});
       expectedBounds.add(new byte[] {      0x20, 0, 0, 0, 0, 0, 0, 0});
@@ -288,7 +288,7 @@ public class TestRegionSplitter {
 
   @Test
   public void noopRollingSplit() throws Exception {
-    final List<byte[]> expectedBounds = new ArrayList<byte[]>();
+    final List<byte[]> expectedBounds = new ArrayList<byte[]>(1);
     expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
     rollingSplitAndVerify(TableName.valueOf(TestRegionSplitter.class.getSimpleName()),
         "UniformSplit", expectedBounds);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
index ec5f037..43f35c0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/IOTestProvider.java
@@ -108,7 +108,7 @@ public class IOTestProvider implements WALProvider {
 
   @Override
   public List<WAL> getWALs() throws IOException {
-    List<WAL> wals = new ArrayList<WAL>();
+    List<WAL> wals = new ArrayList<WAL>(1);
     wals.add(log);
     return wals;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
index 485c1f5..26329f6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperACL.java
@@ -325,7 +325,7 @@ public class TestZooKeeperACL {
     if (!secureZKAvailable) {
       return;
     }
-    List<ServerName> drainingServers = new ArrayList<ServerName>();
+    List<ServerName> drainingServers = new ArrayList<ServerName>(1);
     drainingServers.add(ServerName.parseServerName("ZZZ,123,123"));
 
     // If unable to connect to secure ZK cluster then this operation would fail.

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
index 074b9f7..1403805 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
@@ -54,7 +54,7 @@ public abstract class AbstractTestShell {
     TEST_UTIL.startMiniCluster();
 
     // Configure jruby runtime
-    List<String> loadPaths = new ArrayList();
+    List<String> loadPaths = new ArrayList<>(2);
     loadPaths.add("src/main/ruby");
     loadPaths.add("src/test/ruby");
     jruby.getProvider().setLoadPaths(loadPaths);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java
index 3f11853..339f5a1 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestShellNoCluster.java
@@ -36,7 +36,7 @@ public class TestShellNoCluster extends AbstractTestShell {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     // no cluster
-    List<String> loadPaths = new ArrayList();
+    List<String> loadPaths = new ArrayList<>(2);
     loadPaths.add("src/main/ruby");
     loadPaths.add("src/test/ruby");
     jruby.getProvider().setLoadPaths(loadPaths);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
index 2320ad3..26da816 100644
--- a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
+++ b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
@@ -74,7 +74,7 @@ public class TestShellRSGroups {
     TEST_UTIL.startMiniCluster(1,4);
 
     // Configure jruby runtime
-    List<String> loadPaths = new ArrayList();
+    List<String> loadPaths = new ArrayList<>(2);
     loadPaths.add(basePath+"/src/main/ruby");
     loadPaths.add(basePath+"/src/test/ruby");
     jruby.getProvider().setLoadPaths(loadPaths);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
index 68b2edd..8d55cf9 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkDeleteExample.java
@@ -50,7 +50,7 @@ final public class JavaHBaseBulkDeleteExample {
     JavaSparkContext jsc = new JavaSparkContext(sparkConf);
 
     try {
-      List<byte[]> list = new ArrayList<>();
+      List<byte[]> list = new ArrayList<>(5);
       list.add(Bytes.toBytes("1"));
       list.add(Bytes.toBytes("2"));
       list.add(Bytes.toBytes("3"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
index c7dcbb6..b28b813 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkGetExample.java
@@ -53,7 +53,7 @@ final public class JavaHBaseBulkGetExample {
     JavaSparkContext jsc = new JavaSparkContext(sparkConf);
 
     try {
-      List<byte[]> list = new ArrayList<>();
+      List<byte[]> list = new ArrayList<>(5);
       list.add(Bytes.toBytes("1"));
       list.add(Bytes.toBytes("2"));
       list.add(Bytes.toBytes("3"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
index ded5081..661a313 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkPutExample.java
@@ -52,7 +52,7 @@ final public class JavaHBaseBulkPutExample {
     JavaSparkContext jsc = new JavaSparkContext(sparkConf);
 
     try {
-      List<String> list = new ArrayList<>();
+      List<String> list = new ArrayList<>(5);
       list.add("1," + columnFamily + ",a,1");
       list.add("2," + columnFamily + ",a,2");
       list.add("3," + columnFamily + ",a,3");

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
index 0d41a70..316f8a1 100644
--- a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
+++ b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseMapGetPutExample.java
@@ -59,7 +59,7 @@ final public class JavaHBaseMapGetPutExample {
     JavaSparkContext jsc = new JavaSparkContext(sparkConf);
 
     try {
-      List<byte[]> list = new ArrayList<>();
+      List<byte[]> list = new ArrayList<>(5);
       list.add(Bytes.toBytes("1"));
       list.add(Bytes.toBytes("2"));
       list.add(Bytes.toBytes("3"));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
index 7761acb..853ec9b 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/HBaseTableScanRDD.scala
@@ -107,7 +107,7 @@ class HBaseTableScanRDD(relation: HBaseRelation,
       columns: Seq[Field],
       hbaseContext: HBaseContext): Iterator[Result] = {
     g.grouped(relation.bulkGetSize).flatMap{ x =>
-      val gets = new ArrayList[Get]()
+      val gets = new ArrayList[Get](x.size)
       x.foreach{ y =>
         val g = new Get(y)
         handleTimeSemantics(g)

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
----------------------------------------------------------------------
diff --git a/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java b/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
index 724ac36..da6b724 100644
--- a/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
+++ b/hbase-spark/src/test/java/org/apache/hadoop/hbase/spark/TestJavaHBaseContext.java
@@ -117,7 +117,7 @@ public class TestJavaHBaseContext implements Serializable {
   @Test
   public void testBulkPut() throws IOException {
 
-    List<String> list = new ArrayList<>();
+    List<String> list = new ArrayList<>(5);
     list.add("1," + columnFamilyStr + ",a,1");
     list.add("2," + columnFamilyStr + ",a,2");
     list.add("3," + columnFamilyStr + ",a,3");
@@ -134,7 +134,7 @@ public class TestJavaHBaseContext implements Serializable {
     Table table = conn.getTable(TableName.valueOf(tableName));
 
     try {
-      List<Delete> deletes = new ArrayList<>();
+      List<Delete> deletes = new ArrayList<>(5);
       for (int i = 1; i < 6; i++) {
         deletes.add(new Delete(Bytes.toBytes(Integer.toString(i))));
       }
@@ -186,7 +186,7 @@ public class TestJavaHBaseContext implements Serializable {
 
   @Test
   public void testBulkDelete() throws IOException {
-    List<byte[]> list = new ArrayList<>();
+    List<byte[]> list = new ArrayList<>(3);
     list.add(Bytes.toBytes("1"));
     list.add(Bytes.toBytes("2"));
     list.add(Bytes.toBytes("3"));
@@ -255,7 +255,7 @@ public class TestJavaHBaseContext implements Serializable {
 
   @Test
   public void testBulkGet() throws IOException {
-    List<byte[]> list = new ArrayList<>();
+    List<byte[]> list = new ArrayList<>(5);
     list.add(Bytes.toBytes("1"));
     list.add(Bytes.toBytes("2"));
     list.add(Bytes.toBytes("3"));
@@ -324,7 +324,7 @@ public class TestJavaHBaseContext implements Serializable {
       Connection conn = ConnectionFactory.createConnection(conf);
       Table table = conn.getTable(tableName)) {
 
-      List<Put> puts = new ArrayList<>();
+      List<Put> puts = new ArrayList<>(5);
 
       for (int i = 1; i < 6; i++) {
         Put put = new Put(Bytes.toBytes(Integer.toString(i)));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index 22bca55..46ea7f8 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -849,7 +849,7 @@ public class ThriftServerRunner implements Runnable {
     throws IOError {
       try (RegionLocator locator = connectionCache.getRegionLocator(getBytes(tableName))) {
         List<HRegionLocation> regionLocations = locator.getAllRegionLocations();
-        List<TRegionInfo> results = new ArrayList<TRegionInfo>();
+        List<TRegionInfo> results = new ArrayList<TRegionInfo>(regionLocations.size());
         for (HRegionLocation regionLocation : regionLocations) {
           HRegionInfo info = regionLocation.getRegionInfo();
           ServerName serverName = regionLocation.getServerName();
@@ -1861,7 +1861,7 @@ public class ThriftServerRunner implements Runnable {
    */
   private static void addAttributes(OperationWithAttributes op,
     Map<ByteBuffer, ByteBuffer> attributes) {
-    if (attributes == null || attributes.size() == 0) {
+    if (attributes == null || attributes.isEmpty()) {
       return;
     }
     for (Map.Entry<ByteBuffer, ByteBuffer> entry : attributes.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
index 9a3eb58..d2a95ce 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftUtilities.java
@@ -149,7 +149,7 @@ public class ThriftUtilities {
    * @return Thrift TRowResult array
    */
   static public List<TRowResult> rowResultFromHBase(Result[] in, boolean sortColumns) {
-    List<TRowResult> results = new ArrayList<TRowResult>();
+    List<TRowResult> results = new ArrayList<TRowResult>(in.length);
     for ( Result result_ : in) {
         if(result_ == null || result_.isEmpty()) {
             continue;

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 807af76..0001b3f 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -160,7 +160,7 @@ public class ThriftUtilities {
     if (row != null) {
       out.setRow(in.getRow());
     }
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(raw.length);
     for (Cell kv : raw) {
       TColumnValue col = new TColumnValue();
       col.setFamily(CellUtil.cloneFamily(kv));
@@ -328,7 +328,7 @@ public class ThriftUtilities {
   public static TDelete deleteFromHBase(Delete in) {
     TDelete out = new TDelete(ByteBuffer.wrap(in.getRow()));
 
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(in.getFamilyCellMap().entrySet().size());
     long rowTimestamp = in.getTimeStamp();
     if (rowTimestamp != HConstants.LATEST_TIMESTAMP) {
       out.setTimestamp(rowTimestamp);
@@ -517,7 +517,7 @@ public class ThriftUtilities {
    */
   private static void addAttributes(OperationWithAttributes op,
                                     Map<ByteBuffer, ByteBuffer> attributes) {
-    if (attributes == null || attributes.size() == 0) {
+    if (attributes == null || attributes.isEmpty()) {
       return;
     }
     for (Map.Entry<ByteBuffer, ByteBuffer> entry : attributes.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
index 0f49f56..26019be 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftHttpServer.java
@@ -127,7 +127,7 @@ public class TestThriftHttpServer {
   }
 
   private void runThriftServer(int customHeaderSize) throws Exception {
-    List<String> args = new ArrayList<String>();
+    List<String> args = new ArrayList<String>(3);
     port = HBaseTestingUtility.randomFreePort();
     args.add("-" + ThriftServer.PORT_OPTION);
     args.add(String.valueOf(port));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
index 19245ea..06a7d3d 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
@@ -273,7 +273,7 @@ public class TestThriftServer {
     handler.mutateRow(tableAname, rowAname, mutations, null);
     handler.mutateRow(tableAname, rowBname, mutations, null);
 
-    List<TIncrement> increments = new ArrayList<TIncrement>();
+    List<TIncrement> increments = new ArrayList<TIncrement>(3);
     increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7));
     increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7));
     increments.add(new TIncrement(tableAname, rowBname, columnAAname, 7));
@@ -364,7 +364,7 @@ public class TestThriftServer {
     assertEquals(0, size);
 
     // Try null mutation
-    List<Mutation> mutations = new ArrayList<Mutation>();
+    List<Mutation> mutations = new ArrayList<Mutation>(1);
     mutations.add(new Mutation(false, columnAname, null, true));
     handler.mutateRow(tableAname, rowAname, mutations, null);
     TRowResult rowResult3 = handler.getRow(tableAname, rowAname, null).get(0);
@@ -423,7 +423,7 @@ public class TestThriftServer {
     // ColumnAname has been deleted, and will never be visible even with a getRowTs()
     assertFalse(rowResult2.columns.containsKey(columnAname));
 
-    List<ByteBuffer> columns = new ArrayList<ByteBuffer>();
+    List<ByteBuffer> columns = new ArrayList<ByteBuffer>(1);
     columns.add(columnBname);
 
     rowResult1 = handler.getRowWithColumns(tableAname, rowAname, columns, null).get(0);
@@ -542,7 +542,7 @@ public class TestThriftServer {
     assertEquals(rowResult6.sortedColumns.size(), 1);
     assertEquals(rowResult6.sortedColumns.get(0).getCell().value, valueCname);
 
-    List<Mutation> rowBmutations = new ArrayList<Mutation>();
+    List<Mutation> rowBmutations = new ArrayList<Mutation>(20);
     for (int i = 0; i < 20; i++) {
       rowBmutations.add(new Mutation(false, asByteBuffer("columnA:" + i), valueCname, true));
     }
@@ -659,9 +659,9 @@ public class TestThriftServer {
       mutations.add(new Mutation(false, columnAname, valueAname, true));
       handler.mutateRow(tableAname, rowAname, mutations, null);
 
-      List<ByteBuffer> columnList = new ArrayList<ByteBuffer>();
+      List<ByteBuffer> columnList = new ArrayList<ByteBuffer>(1);
       columnList.add(columnAname);
-      List<ByteBuffer> valueList = new ArrayList<ByteBuffer>();
+      List<ByteBuffer> valueList = new ArrayList<ByteBuffer>(1);
       valueList.add(valueBname);
 
       TAppend append = new TAppend(tableAname, rowAname, columnList, valueList);
@@ -714,7 +714,7 @@ public class TestThriftServer {
    * default ColumnDescriptor and one ColumnDescriptor with fewer versions
    */
   private static List<ColumnDescriptor> getColumnDescriptors() {
-    ArrayList<ColumnDescriptor> cDescriptors = new ArrayList<ColumnDescriptor>();
+    ArrayList<ColumnDescriptor> cDescriptors = new ArrayList<ColumnDescriptor>(2);
 
     // A default ColumnDescriptor
     ColumnDescriptor cDescA = new ColumnDescriptor();
@@ -748,7 +748,7 @@ public class TestThriftServer {
    * and columnB having valueB
    */
   private static List<Mutation> getMutations() {
-    List<Mutation> mutations = new ArrayList<Mutation>();
+    List<Mutation> mutations = new ArrayList<Mutation>(2);
     mutations.add(new Mutation(false, columnAname, valueAname, true));
     mutations.add(new Mutation(false, columnBname, valueBname, true));
     return mutations;
@@ -763,19 +763,19 @@ public class TestThriftServer {
    * (rowB, columnB): place valueD
    */
   private static List<BatchMutation> getBatchMutations() {
-    List<BatchMutation> batchMutations = new ArrayList<BatchMutation>();
+    List<BatchMutation> batchMutations = new ArrayList<BatchMutation>(3);
 
     // Mutations to rowA.  You can't mix delete and put anymore.
-    List<Mutation> rowAmutations = new ArrayList<Mutation>();
+    List<Mutation> rowAmutations = new ArrayList<Mutation>(1);
     rowAmutations.add(new Mutation(true, columnAname, null, true));
     batchMutations.add(new BatchMutation(rowAname, rowAmutations));
 
-    rowAmutations = new ArrayList<Mutation>();
+    rowAmutations = new ArrayList<Mutation>(1);
     rowAmutations.add(new Mutation(false, columnBname, valueCname, true));
     batchMutations.add(new BatchMutation(rowAname, rowAmutations));
 
     // Mutations to rowB
-    List<Mutation> rowBmutations = new ArrayList<Mutation>();
+    List<Mutation> rowBmutations = new ArrayList<Mutation>(2);
     rowBmutations.add(new Mutation(false, columnAname, valueCname, true));
     rowBmutations.add(new Mutation(false, columnBname, valueDname, true));
     batchMutations.add(new BatchMutation(rowBname, rowBmutations));

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
index 38e3780..6df4353 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandler.java
@@ -177,7 +177,7 @@ public class TestThriftHBaseServiceHandler {
     TGet get = new TGet(wrap(rowName));
     assertFalse(handler.exists(table, get));
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
     TPut put = new TPut(wrap(rowName), columnValues);
@@ -194,7 +194,7 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testPutGet".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
     TPut put = new TPut(wrap(rowName), columnValues);
@@ -218,16 +218,16 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName1 = "testPutGetMultiple1".getBytes();
     byte[] rowName2 = "testPutGetMultiple2".getBytes();
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
-    List<TPut> puts = new ArrayList<TPut>();
+    List<TPut> puts = new ArrayList<TPut>(2);
     puts.add(new TPut(wrap(rowName1), columnValues));
     puts.add(new TPut(wrap(rowName2), columnValues));
 
     handler.putMultiple(table, puts);
 
-    List<TGet> gets = new ArrayList<TGet>();
+    List<TGet> gets = new ArrayList<TGet>(2);
     gets.add(new TGet(wrap(rowName1)));
     gets.add(new TGet(wrap(rowName2)));
 
@@ -248,16 +248,16 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName1 = "testDeleteMultiple1".getBytes();
     byte[] rowName2 = "testDeleteMultiple2".getBytes();
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname), wrap(valueBname)));
-    List<TPut> puts = new ArrayList<TPut>();
+    List<TPut> puts = new ArrayList<TPut>(2);
     puts.add(new TPut(wrap(rowName1), columnValues));
     puts.add(new TPut(wrap(rowName2), columnValues));
 
     handler.putMultiple(table, puts);
 
-    List<TDelete> deletes = new ArrayList<TDelete>();
+    List<TDelete> deletes = new ArrayList<TDelete>(2);
     deletes.add(new TDelete(wrap(rowName1)));
     deletes.add(new TDelete(wrap(rowName2)));
 
@@ -275,7 +275,7 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testDelete".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
     TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
@@ -289,7 +289,7 @@ public class TestThriftHBaseServiceHandler {
     handler.put(table, put);
 
     TDelete delete = new TDelete(wrap(rowName));
-    List<TColumn> deleteColumns = new ArrayList<TColumn>();
+    List<TColumn> deleteColumns = new ArrayList<TColumn>(1);
     TColumn deleteColumn = new TColumn(wrap(familyAname));
     deleteColumn.setQualifier(qualifierAname);
     deleteColumns.add(deleteColumn);
@@ -301,7 +301,7 @@ public class TestThriftHBaseServiceHandler {
     TResult result = handler.get(table, get);
     assertArrayEquals(rowName, result.getRow());
     List<TColumnValue> returnedColumnValues = result.getColumnValues();
-    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(1);
     expectedColumnValues.add(columnValueB);
     assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
   }
@@ -312,7 +312,7 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testDeleteAllTimestamps".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
     columnValueA.setTimestamp(System.currentTimeMillis() - 10);
@@ -331,7 +331,7 @@ public class TestThriftHBaseServiceHandler {
     assertEquals(2, result.getColumnValuesSize());
 
     TDelete delete = new TDelete(wrap(rowName));
-    List<TColumn> deleteColumns = new ArrayList<TColumn>();
+    List<TColumn> deleteColumns = new ArrayList<TColumn>(1);
     TColumn deleteColumn = new TColumn(wrap(familyAname));
     deleteColumn.setQualifier(qualifierAname);
     deleteColumns.add(deleteColumn);
@@ -355,7 +355,7 @@ public class TestThriftHBaseServiceHandler {
     long timestamp1 = System.currentTimeMillis() - 10;
     long timestamp2 = System.currentTimeMillis();
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
     columnValueA.setTimestamp(timestamp1);
@@ -374,7 +374,7 @@ public class TestThriftHBaseServiceHandler {
     assertEquals(2, result.getColumnValuesSize());
 
     TDelete delete = new TDelete(wrap(rowName));
-    List<TColumn> deleteColumns = new ArrayList<TColumn>();
+    List<TColumn> deleteColumns = new ArrayList<TColumn>(1);
     TColumn deleteColumn = new TColumn(wrap(familyAname));
     deleteColumn.setQualifier(qualifierAname);
     deleteColumns.add(deleteColumn);
@@ -397,14 +397,14 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testIncrement".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(Bytes.toBytes(1L))));
     TPut put = new TPut(wrap(rowName), columnValues);
     put.setColumnValues(columnValues);
     handler.put(table, put);
 
-    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>();
+    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(1);
     incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
     TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);
     handler.increment(table, increment);
@@ -425,13 +425,13 @@ public class TestThriftHBaseServiceHandler {
     ByteBuffer table = wrap(tableAname);
     byte[] v1 = Bytes.toBytes("42");
     byte[] v2 = Bytes.toBytes("23");
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v1)));
     TPut put = new TPut(wrap(rowName), columnValues);
     put.setColumnValues(columnValues);
     handler.put(table, put);
 
-    List<TColumnValue> appendColumns = new ArrayList<TColumnValue>();
+    List<TColumnValue> appendColumns = new ArrayList<TColumnValue>(1);
     appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(v2)));
     TAppend append = new TAppend(wrap(rowName), appendColumns);
     handler.append(table, append);
@@ -457,14 +457,14 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testCheckAndPut".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
     columnValuesA.add(columnValueA);
     TPut putA = new TPut(wrap(rowName), columnValuesA);
     putA.setColumnValues(columnValuesA);
 
-    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
       wrap(valueBname));
     columnValuesB.add(columnValueB);
@@ -486,7 +486,7 @@ public class TestThriftHBaseServiceHandler {
     result = handler.get(table, get);
     assertArrayEquals(rowName, result.getRow());
     List<TColumnValue> returnedColumnValues = result.getColumnValues();
-    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(2);
     expectedColumnValues.add(columnValueA);
     expectedColumnValues.add(columnValueB);
     assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
@@ -504,14 +504,14 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testCheckAndDelete".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
     columnValuesA.add(columnValueA);
     TPut putA = new TPut(wrap(rowName), columnValuesA);
     putA.setColumnValues(columnValuesA);
 
-    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueB = new TColumnValue(wrap(familyBname), wrap(qualifierBname),
       wrap(valueBname));
     columnValuesB.add(columnValueB);
@@ -549,7 +549,7 @@ public class TestThriftHBaseServiceHandler {
     // insert data
     TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     for (int i = 0; i < 10; i++) {
       TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues);
@@ -558,7 +558,7 @@ public class TestThriftHBaseServiceHandler {
 
     // create scan instance
     TScan scan = new TScan();
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     column.setQualifier(qualifierAname);
@@ -610,7 +610,7 @@ public class TestThriftHBaseServiceHandler {
     // insert data
     TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
         wrap(valueAname));
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     for (int i = 0; i < numTrials; i++) {
       TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues);
@@ -619,7 +619,7 @@ public class TestThriftHBaseServiceHandler {
 
     // create scan instance
     TScan scan = new TScan();
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     column.setQualifier(qualifierAname);
@@ -648,7 +648,7 @@ public class TestThriftHBaseServiceHandler {
     // insert data
     TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     for (int i = 0; i < 10; i++) {
       TPut put = new TPut(wrap(("testReverseScan" + i).getBytes()), columnValues);
@@ -658,7 +658,7 @@ public class TestThriftHBaseServiceHandler {
     // create reverse scan instance
     TScan scan = new TScan();
     scan.setReversed(true);
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     column.setQualifier(qualifierAname);
@@ -697,7 +697,7 @@ public class TestThriftHBaseServiceHandler {
     // insert data
     TColumnValue columnValue = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname));
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     for (int i = 0; i < 10; i++) {
       TPut put = new TPut(wrap(("testScanWithFilter" + i).getBytes()), columnValues);
@@ -706,7 +706,7 @@ public class TestThriftHBaseServiceHandler {
 
     // create scan instance with filter
     TScan scan = new TScan();
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     column.setQualifier(qualifierAname);
@@ -746,7 +746,7 @@ public class TestThriftHBaseServiceHandler {
     ThriftHBaseServiceHandler handler = createHandler();
     byte[] rowName = "testPutTTL".getBytes();
     ByteBuffer table = wrap(tableAname);
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
 
     // Add some dummy data
     columnValues.add(
@@ -810,7 +810,7 @@ public class TestThriftHBaseServiceHandler {
     ByteBuffer table = wrap(tableAname);
 
     // insert data
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(100);
     for (int i = 0; i < 100; i++) {
       String colNum = pad(i, (byte) 3);
       TColumnValue columnValue = new TColumnValue(wrap(familyAname),
@@ -822,7 +822,7 @@ public class TestThriftHBaseServiceHandler {
 
     // create scan instance
     TScan scan = new TScan();
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     columns.add(column);
@@ -871,7 +871,7 @@ public class TestThriftHBaseServiceHandler {
     // insert data
     TColumnValue columnValue =
         new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname));
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(columnValue);
     for (int i = 0; i < 20; i++) {
       TPut put =
@@ -881,7 +881,7 @@ public class TestThriftHBaseServiceHandler {
 
     // create scan instance
     TScan scan = new TScan();
-    List<TColumn> columns = new ArrayList<TColumn>();
+    List<TColumn> columns = new ArrayList<TColumn>(1);
     TColumn column = new TColumn();
     column.setFamily(familyAname);
     column.setQualifier(qualifierAname);
@@ -956,7 +956,7 @@ public class TestThriftHBaseServiceHandler {
     TGet get = new TGet(wrap(rowName));
     assertFalse(handler.exists(table, get));
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname),  wrap(valueBname)));
     TPut put = new TPut(wrap(rowName), columnValues);
@@ -988,7 +988,7 @@ public class TestThriftHBaseServiceHandler {
     Get get = getFromThrift(tGet);
     assertArrayEquals(get.getAttribute("attribute1"), attributeValue);
 
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
     TPut tPut = new TPut(wrap(rowName) , columnValues);
     tPut.setAttributes(attributes);
@@ -1000,7 +1000,7 @@ public class TestThriftHBaseServiceHandler {
     Scan scan = scanFromThrift(tScan);
     assertArrayEquals(scan.getAttribute("attribute1"), attributeValue);
 
-    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>();
+    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(1);
     incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
     TIncrement tIncrement = new TIncrement(wrap(rowName), incrementColumns);
     tIncrement.setAttributes(attributes);
@@ -1025,7 +1025,7 @@ public class TestThriftHBaseServiceHandler {
     byte[] rowName = "testMutateRow".getBytes();
     ByteBuffer table = wrap(tableAname);
 
-    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(wrap(familyAname), wrap(qualifierAname),
         wrap(valueAname));
     columnValuesA.add(columnValueA);
@@ -1039,11 +1039,11 @@ public class TestThriftHBaseServiceHandler {
     assertArrayEquals(rowName, result.getRow());
     List<TColumnValue> returnedColumnValues = result.getColumnValues();
 
-    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> expectedColumnValues = new ArrayList<TColumnValue>(1);
     expectedColumnValues.add(columnValueA);
     assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
 
-    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueB = new TColumnValue(wrap(familyAname), wrap(qualifierBname),
         wrap(valueBname));
     columnValuesB.add(columnValueB);
@@ -1051,13 +1051,13 @@ public class TestThriftHBaseServiceHandler {
     putB.setColumnValues(columnValuesB);
 
     TDelete delete = new TDelete(wrap(rowName));
-    List<TColumn> deleteColumns = new ArrayList<TColumn>();
+    List<TColumn> deleteColumns = new ArrayList<TColumn>(1);
     TColumn deleteColumn = new TColumn(wrap(familyAname));
     deleteColumn.setQualifier(qualifierAname);
     deleteColumns.add(deleteColumn);
     delete.setColumns(deleteColumns);
 
-    List<TMutation> mutations = new ArrayList<TMutation>();
+    List<TMutation> mutations = new ArrayList<TMutation>(2);
     TMutation mutationA = TMutation.put(putB);
     mutations.add(mutationA);
 
@@ -1071,7 +1071,7 @@ public class TestThriftHBaseServiceHandler {
     assertArrayEquals(rowName, result.getRow());
     returnedColumnValues = result.getColumnValues();
 
-    expectedColumnValues = new ArrayList<TColumnValue>();
+    expectedColumnValues = new ArrayList<TColumnValue>(1);
     expectedColumnValues.add(columnValueB);
     assertTColumnValuesEqual(expectedColumnValues, returnedColumnValues);
   }
@@ -1086,10 +1086,10 @@ public class TestThriftHBaseServiceHandler {
   @Test
   public void testDurability() throws Exception {
     byte[] rowName = "testDurability".getBytes();
-    List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
     columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname), wrap(valueAname)));
 
-    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>();
+    List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(1);
     incrementColumns.add(new TColumnIncrement(wrap(familyAname), wrap(qualifierAname)));
 
     TDelete tDelete = new TDelete(wrap(rowName));
@@ -1155,7 +1155,7 @@ public class TestThriftHBaseServiceHandler {
     ByteBuffer value = wrap(valueAname);
 
     // Create a mutation to write to 'B', our "mutate" of "checkAndMutate"
-    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesB = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueB = new TColumnValue(family, wrap(qualifierBname), wrap(valueBname));
     columnValuesB.add(columnValueB);
     TPut putB = new TPut(row, columnValuesB);
@@ -1173,7 +1173,7 @@ public class TestThriftHBaseServiceHandler {
         handler.checkAndMutate(table, row, family, qualifier, TCompareOp.EQUAL, value,
             tRowMutations));
 
-    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>();
+    List<TColumnValue> columnValuesA = new ArrayList<TColumnValue>(1);
     TColumnValue columnValueA = new TColumnValue(family, qualifier, value);
     columnValuesA.add(columnValueA);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a1aa1e/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
index b620fb0..8c643db 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift2/TestThriftHBaseServiceHandlerWithLabels.java
@@ -197,7 +197,7 @@ public void testScanWithVisibilityLabels() throws Exception {
   // insert data
   TColumnValue columnValue = new TColumnValue(wrap(familyAname),
       wrap(qualifierAname), wrap(valueAname));
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
   columnValues.add(columnValue);
   for (int i = 0; i < 10; i++) {
     TPut put = new TPut(wrap(("testScan" + i).getBytes()), columnValues);
@@ -212,7 +212,7 @@ public void testScanWithVisibilityLabels() throws Exception {
 
   // create scan instance
   TScan scan = new TScan();
-  List<TColumn> columns = new ArrayList<TColumn>();
+  List<TColumn> columns = new ArrayList<TColumn>(1);
   TColumn column = new TColumn();
   column.setFamily(familyAname);
   column.setQualifier(qualifierAname);
@@ -222,7 +222,7 @@ public void testScanWithVisibilityLabels() throws Exception {
   scan.setStopRow("testScan\uffff".getBytes());
 
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(2);
   labels.add(SECRET);
   labels.add(PRIVATE);
   tauth.setLabels(labels);
@@ -265,7 +265,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception {
   // insert data
   TColumnValue columnValue = new TColumnValue(wrap(familyAname),
       wrap(qualifierAname), wrap(valueAname));
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
   columnValues.add(columnValue);
   for (int i = 0; i < 20; i++) {
     TPut put = new TPut(
@@ -282,7 +282,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception {
 
   // create scan instance
   TScan scan = new TScan();
-  List<TColumn> columns = new ArrayList<TColumn>();
+  List<TColumn> columns = new ArrayList<TColumn>(1);
   TColumn column = new TColumn();
   column.setFamily(familyAname);
   column.setQualifier(qualifierAname);
@@ -293,7 +293,7 @@ public void testGetScannerResultsWithAuthorizations() throws Exception {
   // get 5 rows and check the returned results
   scan.setStopRow("testGetScannerResults05".getBytes());
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(2);
   labels.add(SECRET);
   labels.add(PRIVATE);
   tauth.setLabels(labels);
@@ -321,7 +321,7 @@ public void testGetsWithLabels() throws Exception {
   byte[] rowName = "testPutGet".getBytes();
   ByteBuffer table = wrap(tableAname);
 
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(2);
   columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(valueAname)));
   columnValues.add(new TColumnValue(wrap(familyBname), wrap(qualifierBname),
@@ -334,7 +334,7 @@ public void testGetsWithLabels() throws Exception {
   handler.put(table, put);
   TGet get = new TGet(wrap(rowName));
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(2);
   labels.add(SECRET);
   labels.add(PRIVATE);
   tauth.setLabels(labels);
@@ -351,7 +351,7 @@ public void testIncrementWithTags() throws Exception {
   byte[] rowName = "testIncrementWithTags".getBytes();
   ByteBuffer table = wrap(tableAname);
 
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
   columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(Bytes.toBytes(1L))));
   TPut put = new TPut(wrap(rowName), columnValues);
@@ -359,7 +359,7 @@ public void testIncrementWithTags() throws Exception {
   put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
   handler.put(table, put);
 
-  List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>();
+  List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(1);
   incrementColumns.add(new TColumnIncrement(wrap(familyAname),
       wrap(qualifierAname)));
   TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);
@@ -368,7 +368,7 @@ public void testIncrementWithTags() throws Exception {
 
   TGet get = new TGet(wrap(rowName));
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(1);
   labels.add(SECRET);
   tauth.setLabels(labels);
   get.setAuthorizations(tauth);
@@ -386,7 +386,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception {
   byte[] rowName = "testIncrementWithTagsWithNotMatchLabels".getBytes();
   ByteBuffer table = wrap(tableAname);
 
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
   columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(Bytes.toBytes(1L))));
   TPut put = new TPut(wrap(rowName), columnValues);
@@ -394,7 +394,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception {
   put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
   handler.put(table, put);
 
-  List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>();
+  List<TColumnIncrement> incrementColumns = new ArrayList<TColumnIncrement>(1);
   incrementColumns.add(new TColumnIncrement(wrap(familyAname),
       wrap(qualifierAname)));
   TIncrement increment = new TIncrement(wrap(rowName), incrementColumns);
@@ -403,7 +403,7 @@ public void testIncrementWithTagsWithNotMatchLabels() throws Exception {
 
   TGet get = new TGet(wrap(rowName));
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(1);
   labels.add(PUBLIC);
   tauth.setLabels(labels);
   get.setAuthorizations(tauth);
@@ -418,7 +418,7 @@ public void testAppend() throws Exception {
   ByteBuffer table = wrap(tableAname);
   byte[] v1 = Bytes.toBytes(1L);
   byte[] v2 = Bytes.toBytes(5L);
-  List<TColumnValue> columnValues = new ArrayList<TColumnValue>();
+  List<TColumnValue> columnValues = new ArrayList<TColumnValue>(1);
   columnValues.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(Bytes.toBytes(1L))));
   TPut put = new TPut(wrap(rowName), columnValues);
@@ -426,7 +426,7 @@ public void testAppend() throws Exception {
   put.setCellVisibility(new TCellVisibility().setExpression(PRIVATE));
   handler.put(table, put);
 
-  List<TColumnValue> appendColumns = new ArrayList<TColumnValue>();
+  List<TColumnValue> appendColumns = new ArrayList<TColumnValue>(1);
   appendColumns.add(new TColumnValue(wrap(familyAname), wrap(qualifierAname),
       wrap(v2)));
   TAppend append = new TAppend(wrap(rowName), appendColumns);
@@ -435,7 +435,7 @@ public void testAppend() throws Exception {
 
   TGet get = new TGet(wrap(rowName));
   TAuthorization tauth = new TAuthorization();
-  List<String> labels = new ArrayList<String>();
+  List<String> labels = new ArrayList<String>(1);
   labels.add(SECRET);
   tauth.setLabels(labels);
   get.setAuthorizations(tauth);


[42/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages - addendum fixes line lengths (Josh Elser)

Posted by el...@apache.org.
HBASE-16995 Build client Java API and client protobuf messages - addendum fixes line lengths (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/41d73bd2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/41d73bd2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/41d73bd2

Branch: refs/heads/HBASE-16961
Commit: 41d73bd2b7919210ec032343bed30428913f83db
Parents: 26a643d
Author: tedyu <yu...@gmail.com>
Authored: Mon Nov 21 13:00:27 2016 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Mon Jan 23 17:48:59 2017 -0500

----------------------------------------------------------------------
 .../hbase/quotas/QuotaSettingsFactory.java      | 20 ++++++++++++--------
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  8 ++++----
 .../hbase/shaded/protobuf/ProtobufUtil.java     |  9 +++++----
 3 files changed, 21 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/41d73bd2/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index b8e99b8..f436cf2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -130,7 +130,8 @@ public class QuotaSettingsFactory {
 
   static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota protoQuota) {
     if ((null == table && null == namespace) || (null != table && null != namespace)) {
-      throw new IllegalArgumentException("Can only construct SpaceLimitSettings for a table or namespace.");
+      throw new IllegalArgumentException(
+          "Can only construct SpaceLimitSettings for a table or namespace.");
     }
     if (null != table) {
       return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
@@ -302,29 +303,32 @@ public class QuotaSettingsFactory {
    */
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table to the given size in bytes.
-   * When the space usage is exceeded by the table, the provided {@link SpaceViolationPolicy} is enacted on the table.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table
+   * to the given size in bytes. When the space usage is exceeded by the table, the provided
+   * {@link SpaceViolationPolicy} is enacted on the table.
    *
    * @param tableName The name of the table on which the quota should be applied.
    * @param sizeLimit The limit of a table's size in bytes.
    * @param violationPolicy The action to take when the quota is exceeded.
    * @return An {@link QuotaSettings} object.
    */
-  public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitTableSpace(
+      final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
     return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
   }
 
   /**
-   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given namespace to the given size in bytes.
-   * When the space usage is exceeded by all tables in the namespace, the provided {@link SpaceViolationPolicy} is enacted on
-   * all tables in the namespace.
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given
+   * namespace to the given size in bytes. When the space usage is exceeded by all tables in the
+   * namespace, the provided {@link SpaceViolationPolicy} is enacted on all tables in the namespace.
    *
    * @param namespace The namespace on which the quota should be applied.
    * @param sizeLimit The limit of the namespace's size in bytes.
    * @param violationPolicy The action to take when the the quota is exceeded.
    * @return An {@link QuotaSettings} object.
    */
-  public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
+  public static QuotaSettings limitNamespaceSpace(
+      final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
     return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/41d73bd2/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
index dded9b5..e54882e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
@@ -93,8 +93,8 @@ class SpaceLimitSettings extends QuotaSettings {
    * @param proto The protobuf representation.
    * @return A QuotaSettings.
    */
-  static SpaceLimitSettings fromSpaceQuota(final TableName tableName,
-      final QuotaProtos.SpaceQuota proto) {
+  static SpaceLimitSettings fromSpaceQuota(
+      final TableName tableName, final QuotaProtos.SpaceQuota proto) {
     validateProtoArguments(proto);
     return new SpaceLimitSettings(tableName, proto.getSoftLimit(),
         ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));
@@ -107,8 +107,8 @@ class SpaceLimitSettings extends QuotaSettings {
    * @param proto The protobuf representation.
    * @return A QuotaSettings.
    */
-  static SpaceLimitSettings fromSpaceQuota(final String namespace,
-      final QuotaProtos.SpaceQuota proto) {
+  static SpaceLimitSettings fromSpaceQuota(
+      final String namespace, final QuotaProtos.SpaceQuota proto) {
     validateProtoArguments(proto);
     return new SpaceLimitSettings(namespace, proto.getSoftLimit(),
         ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));

http://git-wip-us.apache.org/repos/asf/hbase/blob/41d73bd2/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 4f18138..d5dfeee 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -2523,7 +2523,8 @@ public final class ProtobufUtil {
    * @param proto The protocol buffer space violation policy.
    * @return The corresponding client SpaceViolationPolicy.
    */
-  public static SpaceViolationPolicy toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) {
+  public static SpaceViolationPolicy toViolationPolicy(
+      final QuotaProtos.SpaceViolationPolicy proto) {
     switch (proto) {
       case DISABLE: return SpaceViolationPolicy.DISABLE;
       case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS;
@@ -2574,8 +2575,8 @@ public final class ProtobufUtil {
    * @param violationPolicy The policy to apply when the quota is violated.
    * @return The protocol buffer SpaceQuota.
    */
-  public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit,
-      final SpaceViolationPolicy violationPolicy) {
+  public static QuotaProtos.SpaceQuota toProtoSpaceQuota(
+      final long limit, final SpaceViolationPolicy violationPolicy) {
     return QuotaProtos.SpaceQuota.newBuilder()
         .setSoftLimit(limit)
         .setViolationPolicy(toProtoViolationPolicy(violationPolicy))
@@ -3277,4 +3278,4 @@ public final class ProtobufUtil {
     int port = Addressing.parsePort(str);
     return ServerName.valueOf(hostname, port, -1L);
   }
-}
\ No newline at end of file
+}


[33/50] [abbrv] hbase git commit: HBASE-17404 Replace explicit type with diamond operator in hbase-annotations (Jan Hentschel)

Posted by el...@apache.org.
HBASE-17404 Replace explicit type with diamond operator in hbase-annotations (Jan Hentschel)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb8f9247
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb8f9247
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb8f9247

Branch: refs/heads/HBASE-16961
Commit: fb8f9247d0262f9875fee954370fd807dc786bfc
Parents: 55a1aa1
Author: Michael Stack <st...@apache.org>
Authored: Fri Jan 20 23:03:56 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 23:03:56 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/classification/tools/RootDocProcessor.java     | 5 ++---
 .../hadoop/hbase/classification/tools/StabilityOptions.java     | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb8f9247/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
index 97d9343..6cc7551 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/RootDocProcessor.java
@@ -79,8 +79,7 @@ final class RootDocProcessor {
     return obj;
   }
 
-  private static Map<Object, Object> proxies =
-    new WeakHashMap<Object, Object>();
+  private static Map<Object, Object> proxies = new WeakHashMap<>();
 
   private static Object getProxy(Object obj) {
     Object proxy = proxies.get(obj);
@@ -207,7 +206,7 @@ final class RootDocProcessor {
       if (array == null || array.length == 0) {
         return array;
       }
-      List<Object> list = new ArrayList<Object>(array.length);
+      List<Object> list = new ArrayList<>(array.length);
       for (Doc entry : array) {
         if (!exclude(entry)) {
           list.add(process(entry, componentType));

http://git-wip-us.apache.org/repos/asf/hbase/blob/fb8f9247/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
----------------------------------------------------------------------
diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
index 6cb03a5..7454436 100644
--- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
+++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/tools/StabilityOptions.java
@@ -53,7 +53,7 @@ final class StabilityOptions {
   }
 
   public static String[][] filterOptions(String[][] options) {
-    List<String[]> optionsList = new ArrayList<String[]>(options.length);
+    List<String[]> optionsList = new ArrayList<>();
     for (int i = 0; i < options.length; i++) {
       if (!options[i][0].equalsIgnoreCase(UNSTABLE_OPTION)
           && !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)


[24/50] [abbrv] hbase git commit: HBASE-16831 Procedure V2 - Remove org.apache.hadoop.hbase.zookeeper.lock (Appy)

Posted by el...@apache.org.
HBASE-16831 Procedure V2 - Remove org.apache.hadoop.hbase.zookeeper.lock
(Appy)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4fdd6ff9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4fdd6ff9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4fdd6ff9

Branch: refs/heads/HBASE-16961
Commit: 4fdd6ff9aef0979d7b4f07abe159dbe4c755ccff
Parents: 558a6bb
Author: Michael Stack <st...@apache.org>
Authored: Thu Jan 19 10:24:24 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Thu Jan 19 16:51:44 2017 -0800

----------------------------------------------------------------------
 .../protobuf/generated/ZooKeeperProtos.java     | 1263 +-----------------
 .../src/main/protobuf/ZooKeeper.proto           |   12 -
 .../protobuf/generated/ZooKeeperProtos.java     | 1239 +----------------
 .../src/main/protobuf/ZooKeeper.proto           |   12 -
 .../apache/hadoop/hbase/InterProcessLock.java   |  105 --
 .../hadoop/hbase/InterProcessReadWriteLock.java |   47 -
 .../zookeeper/lock/ZKInterProcessLockBase.java  |  459 -------
 .../zookeeper/lock/ZKInterProcessReadLock.java  |   73 -
 .../lock/ZKInterProcessReadWriteLock.java       |   66 -
 .../zookeeper/lock/ZKInterProcessWriteLock.java |   61 -
 .../lock/TestZKInterProcessReadWriteLock.java   |  360 -----
 pom.xml                                         |    4 +-
 12 files changed, 11 insertions(+), 3690 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
index de5827e..caafc01 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/ZooKeeperProtos.java
@@ -4120,1244 +4120,6 @@ public final class ZooKeeperProtos {
 
   }
 
-  public interface TableLockOrBuilder extends
-      // @@protoc_insertion_point(interface_extends:hbase.pb.TableLock)
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
-
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    boolean hasTableName();
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName();
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
-
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    boolean hasLockOwner();
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getLockOwner();
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder();
-
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    boolean hasThreadId();
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    long getThreadId();
-
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    boolean hasIsShared();
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    boolean getIsShared();
-
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    boolean hasPurpose();
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    java.lang.String getPurpose();
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
-        getPurposeBytes();
-
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    boolean hasCreateTime();
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    long getCreateTime();
-  }
-  /**
-   * <pre>
-   **
-   * Metadata associated with a table lock in zookeeper
-   * </pre>
-   *
-   * Protobuf type {@code hbase.pb.TableLock}
-   */
-  public  static final class TableLock extends
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
-      // @@protoc_insertion_point(message_implements:hbase.pb.TableLock)
-      TableLockOrBuilder {
-    // Use TableLock.newBuilder() to construct.
-    private TableLock(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
-      super(builder);
-    }
-    private TableLock() {
-      threadId_ = 0L;
-      isShared_ = false;
-      purpose_ = "";
-      createTime_ = 0L;
-    }
-
-    @java.lang.Override
-    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
-    getUnknownFields() {
-      return this.unknownFields;
-    }
-    private TableLock(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      this();
-      int mutable_bitField0_ = 0;
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = tableName_.toBuilder();
-              }
-              tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(tableName_);
-                tableName_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-            case 18: {
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = lockOwner_.toBuilder();
-              }
-              lockOwner_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(lockOwner_);
-                lockOwner_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000002;
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000004;
-              threadId_ = input.readInt64();
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000008;
-              isShared_ = input.readBool();
-              break;
-            }
-            case 42: {
-              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
-              bitField0_ |= 0x00000010;
-              purpose_ = bs;
-              break;
-            }
-            case 48: {
-              bitField0_ |= 0x00000020;
-              createTime_ = input.readInt64();
-              break;
-            }
-          }
-        }
-      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
-            e).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-    }
-
-    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
-    }
-
-    private int bitField0_;
-    public static final int TABLE_NAME_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_;
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public boolean hasTableName() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
-      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-    }
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-    }
-
-    public static final int LOCK_OWNER_FIELD_NUMBER = 2;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName lockOwner_;
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public boolean hasLockOwner() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
-      return lockOwner_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : lockOwner_;
-    }
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
-      return lockOwner_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : lockOwner_;
-    }
-
-    public static final int THREAD_ID_FIELD_NUMBER = 3;
-    private long threadId_;
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    public boolean hasThreadId() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    public long getThreadId() {
-      return threadId_;
-    }
-
-    public static final int IS_SHARED_FIELD_NUMBER = 4;
-    private boolean isShared_;
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    public boolean hasIsShared() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
-    }
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    public boolean getIsShared() {
-      return isShared_;
-    }
-
-    public static final int PURPOSE_FIELD_NUMBER = 5;
-    private volatile java.lang.Object purpose_;
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public boolean hasPurpose() {
-      return ((bitField0_ & 0x00000010) == 0x00000010);
-    }
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public java.lang.String getPurpose() {
-      java.lang.Object ref = purpose_;
-      if (ref instanceof java.lang.String) {
-        return (java.lang.String) ref;
-      } else {
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
-            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
-        java.lang.String s = bs.toStringUtf8();
-        if (bs.isValidUtf8()) {
-          purpose_ = s;
-        }
-        return s;
-      }
-    }
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
-        getPurposeBytes() {
-      java.lang.Object ref = purpose_;
-      if (ref instanceof java.lang.String) {
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
-                (java.lang.String) ref);
-        purpose_ = b;
-        return b;
-      } else {
-        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
-      }
-    }
-
-    public static final int CREATE_TIME_FIELD_NUMBER = 6;
-    private long createTime_;
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    public boolean hasCreateTime() {
-      return ((bitField0_ & 0x00000020) == 0x00000020);
-    }
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    public long getCreateTime() {
-      return createTime_;
-    }
-
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized == 1) return true;
-      if (isInitialized == 0) return false;
-
-      if (hasTableName()) {
-        if (!getTableName().isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      if (hasLockOwner()) {
-        if (!getLockOwner().isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, getTableName());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, getLockOwner());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeInt64(3, threadId_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeBool(4, isShared_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 5, purpose_);
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        output.writeInt64(6, createTime_);
-      }
-      unknownFields.writeTo(output);
-    }
-
-    public int getSerializedSize() {
-      int size = memoizedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, getTableName());
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, getLockOwner());
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeInt64Size(3, threadId_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeBoolSize(4, isShared_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(5, purpose_);
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeInt64Size(6, createTime_);
-      }
-      size += unknownFields.getSerializedSize();
-      memoizedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock other = (org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock) obj;
-
-      boolean result = true;
-      result = result && (hasTableName() == other.hasTableName());
-      if (hasTableName()) {
-        result = result && getTableName()
-            .equals(other.getTableName());
-      }
-      result = result && (hasLockOwner() == other.hasLockOwner());
-      if (hasLockOwner()) {
-        result = result && getLockOwner()
-            .equals(other.getLockOwner());
-      }
-      result = result && (hasThreadId() == other.hasThreadId());
-      if (hasThreadId()) {
-        result = result && (getThreadId()
-            == other.getThreadId());
-      }
-      result = result && (hasIsShared() == other.hasIsShared());
-      if (hasIsShared()) {
-        result = result && (getIsShared()
-            == other.getIsShared());
-      }
-      result = result && (hasPurpose() == other.hasPurpose());
-      if (hasPurpose()) {
-        result = result && getPurpose()
-            .equals(other.getPurpose());
-      }
-      result = result && (hasCreateTime() == other.hasCreateTime());
-      if (hasCreateTime()) {
-        result = result && (getCreateTime()
-            == other.getCreateTime());
-      }
-      result = result && unknownFields.equals(other.unknownFields);
-      return result;
-    }
-
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasTableName()) {
-        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getTableName().hashCode();
-      }
-      if (hasLockOwner()) {
-        hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
-        hash = (53 * hash) + getLockOwner().hashCode();
-      }
-      if (hasThreadId()) {
-        hash = (37 * hash) + THREAD_ID_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
-            getThreadId());
-      }
-      if (hasIsShared()) {
-        hash = (37 * hash) + IS_SHARED_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashBoolean(
-            getIsShared());
-      }
-      if (hasPurpose()) {
-        hash = (37 * hash) + PURPOSE_FIELD_NUMBER;
-        hash = (53 * hash) + getPurpose().hashCode();
-      }
-      if (hasCreateTime()) {
-        hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER;
-        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
-            getCreateTime());
-      }
-      hash = (29 * hash) + unknownFields.hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(byte[] data)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        byte[] data,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(
-        java.io.InputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input);
-    }
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-          .parseWithIOException(PARSER, input, extensionRegistry);
-    }
-
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder() {
-      return DEFAULT_INSTANCE.toBuilder();
-    }
-    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock prototype) {
-      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() {
-      return this == DEFAULT_INSTANCE
-          ? new Builder() : new Builder().mergeFrom(this);
-    }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * <pre>
-     **
-     * Metadata associated with a table lock in zookeeper
-     * </pre>
-     *
-     * Protobuf type {@code hbase.pb.TableLock}
-     */
-    public static final class Builder extends
-        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
-        // @@protoc_insertion_point(builder_implements:hbase.pb.TableLock)
-        org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLockOrBuilder {
-      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-      }
-
-      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
-                .alwaysUseFieldBuilders) {
-          getTableNameFieldBuilder();
-          getLockOwnerFieldBuilder();
-        }
-      }
-      public Builder clear() {
-        super.clear();
-        if (tableNameBuilder_ == null) {
-          tableName_ = null;
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = null;
-        } else {
-          lockOwnerBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        threadId_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        isShared_ = false;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        purpose_ = "";
-        bitField0_ = (bitField0_ & ~0x00000010);
-        createTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000020);
-        return this;
-      }
-
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock build() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock buildPartial() {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock result = new org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (tableNameBuilder_ == null) {
-          result.tableName_ = tableName_;
-        } else {
-          result.tableName_ = tableNameBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        if (lockOwnerBuilder_ == null) {
-          result.lockOwner_ = lockOwner_;
-        } else {
-          result.lockOwner_ = lockOwnerBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.threadId_ = threadId_;
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000008;
-        }
-        result.isShared_ = isShared_;
-        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
-          to_bitField0_ |= 0x00000010;
-        }
-        result.purpose_ = purpose_;
-        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
-          to_bitField0_ |= 0x00000020;
-        }
-        result.createTime_ = createTime_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder clone() {
-        return (Builder) super.clone();
-      }
-      public Builder setField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.setField(field, value);
-      }
-      public Builder clearField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
-        return (Builder) super.clearField(field);
-      }
-      public Builder clearOneof(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
-        return (Builder) super.clearOneof(oneof);
-      }
-      public Builder setRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          int index, Object value) {
-        return (Builder) super.setRepeatedField(field, index, value);
-      }
-      public Builder addRepeatedField(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
-          Object value) {
-        return (Builder) super.addRepeatedField(field, value);
-      }
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock) {
-          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock other) {
-        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this;
-        if (other.hasTableName()) {
-          mergeTableName(other.getTableName());
-        }
-        if (other.hasLockOwner()) {
-          mergeLockOwner(other.getLockOwner());
-        }
-        if (other.hasThreadId()) {
-          setThreadId(other.getThreadId());
-        }
-        if (other.hasIsShared()) {
-          setIsShared(other.getIsShared());
-        }
-        if (other.hasPurpose()) {
-          bitField0_ |= 0x00000010;
-          purpose_ = other.purpose_;
-          onChanged();
-        }
-        if (other.hasCreateTime()) {
-          setCreateTime(other.getCreateTime());
-        }
-        this.mergeUnknownFields(other.unknownFields);
-        onChanged();
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (hasTableName()) {
-          if (!getTableName().isInitialized()) {
-            return false;
-          }
-        }
-        if (hasLockOwner()) {
-          if (!getLockOwner().isInitialized()) {
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock) e.getUnfinishedMessage();
-          throw e.unwrapIOException();
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public boolean hasTableName() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
-        if (tableNameBuilder_ == null) {
-          return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-        } else {
-          return tableNameBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          tableName_ = value;
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder setTableName(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
-        if (tableNameBuilder_ == null) {
-          tableName_ = builderForValue.build();
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              tableName_ != null &&
-              tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
-            tableName_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
-          } else {
-            tableName_ = value;
-          }
-          onChanged();
-        } else {
-          tableNameBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder clearTableName() {
-        if (tableNameBuilder_ == null) {
-          tableName_ = null;
-          onChanged();
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getTableNameFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-        if (tableNameBuilder_ != null) {
-          return tableNameBuilder_.getMessageOrBuilder();
-        } else {
-          return tableName_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
-          getTableNameFieldBuilder() {
-        if (tableNameBuilder_ == null) {
-          tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
-                  getTableName(),
-                  getParentForChildren(),
-                  isClean());
-          tableName_ = null;
-        }
-        return tableNameBuilder_;
-      }
-
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName lockOwner_ = null;
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_;
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public boolean hasLockOwner() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
-        if (lockOwnerBuilder_ == null) {
-          return lockOwner_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : lockOwner_;
-        } else {
-          return lockOwnerBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder setLockOwner(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
-        if (lockOwnerBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          lockOwner_ = value;
-          onChanged();
-        } else {
-          lockOwnerBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder setLockOwner(
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = builderForValue.build();
-          onChanged();
-        } else {
-          lockOwnerBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder mergeLockOwner(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName value) {
-        if (lockOwnerBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              lockOwner_ != null &&
-              lockOwner_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
-            lockOwner_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.newBuilder(lockOwner_).mergeFrom(value).buildPartial();
-          } else {
-            lockOwner_ = value;
-          }
-          onChanged();
-        } else {
-          lockOwnerBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder clearLockOwner() {
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = null;
-          onChanged();
-        } else {
-          lockOwnerBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder getLockOwnerBuilder() {
-        bitField0_ |= 0x00000002;
-        onChanged();
-        return getLockOwnerFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
-        if (lockOwnerBuilder_ != null) {
-          return lockOwnerBuilder_.getMessageOrBuilder();
-        } else {
-          return lockOwner_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance() : lockOwner_;
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
-          getLockOwnerFieldBuilder() {
-        if (lockOwnerBuilder_ == null) {
-          lockOwnerBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  getLockOwner(),
-                  getParentForChildren(),
-                  isClean());
-          lockOwner_ = null;
-        }
-        return lockOwnerBuilder_;
-      }
-
-      private long threadId_ ;
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public boolean hasThreadId() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public long getThreadId() {
-        return threadId_;
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public Builder setThreadId(long value) {
-        bitField0_ |= 0x00000004;
-        threadId_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public Builder clearThreadId() {
-        bitField0_ = (bitField0_ & ~0x00000004);
-        threadId_ = 0L;
-        onChanged();
-        return this;
-      }
-
-      private boolean isShared_ ;
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public boolean hasIsShared() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public boolean getIsShared() {
-        return isShared_;
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public Builder setIsShared(boolean value) {
-        bitField0_ |= 0x00000008;
-        isShared_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public Builder clearIsShared() {
-        bitField0_ = (bitField0_ & ~0x00000008);
-        isShared_ = false;
-        onChanged();
-        return this;
-      }
-
-      private java.lang.Object purpose_ = "";
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public boolean hasPurpose() {
-        return ((bitField0_ & 0x00000010) == 0x00000010);
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public java.lang.String getPurpose() {
-        java.lang.Object ref = purpose_;
-        if (!(ref instanceof java.lang.String)) {
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
-              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
-          java.lang.String s = bs.toStringUtf8();
-          if (bs.isValidUtf8()) {
-            purpose_ = s;
-          }
-          return s;
-        } else {
-          return (java.lang.String) ref;
-        }
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
-          getPurposeBytes() {
-        java.lang.Object ref = purpose_;
-        if (ref instanceof String) {
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
-              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
-                  (java.lang.String) ref);
-          purpose_ = b;
-          return b;
-        } else {
-          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
-        }
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder setPurpose(
-          java.lang.String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000010;
-        purpose_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder clearPurpose() {
-        bitField0_ = (bitField0_ & ~0x00000010);
-        purpose_ = getDefaultInstance().getPurpose();
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder setPurposeBytes(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000010;
-        purpose_ = value;
-        onChanged();
-        return this;
-      }
-
-      private long createTime_ ;
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public boolean hasCreateTime() {
-        return ((bitField0_ & 0x00000020) == 0x00000020);
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public long getCreateTime() {
-        return createTime_;
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public Builder setCreateTime(long value) {
-        bitField0_ |= 0x00000020;
-        createTime_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public Builder clearCreateTime() {
-        bitField0_ = (bitField0_ & ~0x00000020);
-        createTime_ = 0L;
-        onChanged();
-        return this;
-      }
-      public final Builder setUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.setUnknownFields(unknownFields);
-      }
-
-      public final Builder mergeUnknownFields(
-          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
-        return super.mergeUnknownFields(unknownFields);
-      }
-
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.TableLock)
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
-    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock DEFAULT_INSTANCE;
-    static {
-      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock();
-    }
-
-    public static org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstance() {
-      return DEFAULT_INSTANCE;
-    }
-
-    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<TableLock>
-        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<TableLock>() {
-      public TableLock parsePartialFrom(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
-          return new TableLock(input, extensionRegistry);
-      }
-    };
-
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<TableLock> parser() {
-      return PARSER;
-    }
-
-    @java.lang.Override
-    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<TableLock> getParserForType() {
-      return PARSER;
-    }
-
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() {
-      return DEFAULT_INSTANCE;
-    }
-
-  }
-
   public interface SwitchStateOrBuilder extends
       // @@protoc_insertion_point(interface_extends:hbase.pb.SwitchState)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -5861,11 +4623,6 @@ public final class ZooKeeperProtos {
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_DeprecatedTableState_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_TableLock_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_TableLock_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_SwitchState_descriptor;
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -5897,14 +4654,10 @@ public final class ZooKeeperProtos {
       "Y\020\002\"\225\001\n\024DeprecatedTableState\022<\n\005state\030\001 " +
       "\002(\0162$.hbase.pb.DeprecatedTableState.Stat" +
       "e:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" +
-      "BLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\252\001\n\t" +
-      "TableLock\022\'\n\ntable_name\030\001 \001(\0132\023.hbase.pb",
-      ".TableName\022(\n\nlock_owner\030\002 \001(\0132\024.hbase.p" +
-      "b.ServerName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_sh" +
-      "ared\030\004 \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_ti" +
-      "me\030\006 \001(\003\"\036\n\013SwitchState\022\017\n\007enabled\030\001 \001(\010" +
-      "BL\n1org.apache.hadoop.hbase.shaded.proto" +
-      "buf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
+      "BLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"\036\n\013S" +
+      "witchState\022\017\n\007enabled\030\001 \001(\010BL\n1org.apach",
+      "e.hadoop.hbase.shaded.protobuf.generated" +
+      "B\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -5950,14 +4703,8 @@ public final class ZooKeeperProtos {
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_DeprecatedTableState_descriptor,
         new java.lang.String[] { "State", });
-    internal_static_hbase_pb_TableLock_descriptor =
-      getDescriptor().getMessageTypes().get(5);
-    internal_static_hbase_pb_TableLock_fieldAccessorTable = new
-      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
-        internal_static_hbase_pb_TableLock_descriptor,
-        new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", });
     internal_static_hbase_pb_SwitchState_descriptor =
-      getDescriptor().getMessageTypes().get(6);
+      getDescriptor().getMessageTypes().get(5);
     internal_static_hbase_pb_SwitchState_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SwitchState_descriptor,

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-protocol-shaded/src/main/protobuf/ZooKeeper.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/ZooKeeper.proto b/hbase-protocol-shaded/src/main/protobuf/ZooKeeper.proto
index b3bd2ec..fcf9bf5 100644
--- a/hbase-protocol-shaded/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/ZooKeeper.proto
@@ -106,18 +106,6 @@ message DeprecatedTableState {
 }
 
 /**
- * Metadata associated with a table lock in zookeeper
- */
-message TableLock {
-  optional TableName table_name = 1;
-  optional ServerName lock_owner = 2;
-  optional int64 thread_id = 3;
-  optional bool is_shared = 4;
-  optional string purpose = 5;
-  optional int64 create_time = 6;
-}
-
-/**
  * State of the switch.
  */
 message SwitchState {


[22/50] [abbrv] hbase git commit: HBASE-16831 Procedure V2 - Remove org.apache.hadoop.hbase.zookeeper.lock (Appy)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/lock/TestZKInterProcessReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/lock/TestZKInterProcessReadWriteLock.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/lock/TestZKInterProcessReadWriteLock.java
deleted file mode 100644
index 675afc0..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/zookeeper/lock/TestZKInterProcessReadWriteLock.java
+++ /dev/null
@@ -1,360 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper.lock;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.DaemonThreadFactory;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.InterProcessLock;
-import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
-import org.apache.hadoop.hbase.MultithreadedTestUtil;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.Lists;
-
-@Category({MiscTests.class, MediumTests.class})
-public class TestZKInterProcessReadWriteLock {
-
-  private static final Log LOG =
-      LogFactory.getLog(TestZKInterProcessReadWriteLock.class);
-
-  private static final HBaseTestingUtility TEST_UTIL =
-      new HBaseTestingUtility();
-
-  private static final int NUM_THREADS = 10;
-
-  private static Configuration conf;
-
-  private final AtomicBoolean isLockHeld = new AtomicBoolean(false);
-  private final ExecutorService executor =
-      Executors.newFixedThreadPool(NUM_THREADS,
-          new DaemonThreadFactory("TestZKInterProcessReadWriteLock-"));
-
-  @BeforeClass
-  public static void beforeAllTests() throws Exception {
-    conf = TEST_UTIL.getConfiguration();
-    TEST_UTIL.startMiniZKCluster();
-    conf.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
-    ZooKeeperWatcher zkw = getZooKeeperWatcher("setup");
-    ZKUtil.createWithParents(zkw, zkw.znodePaths.tableLockZNode);
-  }
-
-  @AfterClass
-  public static void afterAllTests() throws Exception {
-    TEST_UTIL.shutdownMiniZKCluster();
-  }
-
-  @After
-  public void tearDown() {
-    executor.shutdown();
-  }
-
-  private static ZooKeeperWatcher getZooKeeperWatcher(String desc)
-  throws IOException {
-    return TEST_UTIL.getZooKeeperWatcher();
-  }
-
-
-  @Test(timeout = 30000)
-  public void testWriteLockExcludesWriters() throws Exception {
-    final String testName = "testWriteLockExcludesWriters";
-    final ZKInterProcessReadWriteLock readWriteLock =
-        getReadWriteLock(testName);
-    List<Future<Void>> results = Lists.newArrayList();
-    for (int i = 0; i < NUM_THREADS; ++i) {
-      final String threadDesc = testName + i;
-      results.add(executor.submit(new Callable<Void>() {
-        @Override
-        public Void call() throws IOException {
-          ZKInterProcessWriteLock writeLock =
-              readWriteLock.writeLock(Bytes.toBytes(threadDesc));
-          try {
-            writeLock.acquire();
-            try {
-              // No one else should hold the lock
-              assertTrue(isLockHeld.compareAndSet(false, true));
-              Thread.sleep(1000);
-              // No one else should have released the lock
-              assertTrue(isLockHeld.compareAndSet(true, false));
-            } finally {
-              isLockHeld.set(false);
-              writeLock.release();
-            }
-          } catch (InterruptedException e) {
-            LOG.warn(threadDesc + " interrupted", e);
-            Thread.currentThread().interrupt();
-            throw new InterruptedIOException();
-          }
-          return null;
-        }
-      }));
-
-    }
-    MultithreadedTestUtil.assertOnFutures(results);
-  }
-
-  @Test(timeout = 30000)
-  public void testReadLockDoesNotExcludeReaders() throws Exception {
-    final String testName = "testReadLockDoesNotExcludeReaders";
-    final ZKInterProcessReadWriteLock readWriteLock =
-        getReadWriteLock(testName);
-    final CountDownLatch locksAcquiredLatch = new CountDownLatch(NUM_THREADS);
-    final AtomicInteger locksHeld = new AtomicInteger(0);
-    List<Future<Void>> results = Lists.newArrayList();
-    for (int i = 0; i < NUM_THREADS; ++i) {
-      final String threadDesc = testName + i;
-      results.add(executor.submit(new Callable<Void>() {
-        @Override
-        public Void call() throws Exception {
-          ZKInterProcessReadLock readLock =
-              readWriteLock.readLock(Bytes.toBytes(threadDesc));
-          readLock.acquire();
-          try {
-            locksHeld.incrementAndGet();
-            locksAcquiredLatch.countDown();
-            Thread.sleep(1000);
-          } finally {
-            readLock.release();
-            locksHeld.decrementAndGet();
-          }
-          return null;
-        }
-      }));
-    }
-    locksAcquiredLatch.await();
-    assertEquals(locksHeld.get(), NUM_THREADS);
-    MultithreadedTestUtil.assertOnFutures(results);
-  }
-
-  @Test(timeout = 30000)
-  public void testReadLockExcludesWriters() throws Exception {
-    // Submit a read lock request first
-    // Submit a write lock request second
-    final String testName = "testReadLockExcludesWriters";
-    List<Future<Void>> results = Lists.newArrayList();
-    final CountDownLatch readLockAcquiredLatch = new CountDownLatch(1);
-    Callable<Void> acquireReadLock = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-acquireReadLock";
-        ZKInterProcessReadLock readLock =
-            getReadWriteLock(testName).readLock(Bytes.toBytes(threadDesc));
-        readLock.acquire();
-        try {
-          assertTrue(isLockHeld.compareAndSet(false, true));
-          readLockAcquiredLatch.countDown();
-          Thread.sleep(1000);
-        } finally {
-          isLockHeld.set(false);
-          readLock.release();
-        }
-        return null;
-      }
-    };
-    Callable<Void> acquireWriteLock = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-acquireWriteLock";
-        ZKInterProcessWriteLock writeLock =
-            getReadWriteLock(testName).writeLock(Bytes.toBytes(threadDesc));
-        readLockAcquiredLatch.await();
-        assertTrue(isLockHeld.get());
-        writeLock.acquire();
-        try {
-          assertFalse(isLockHeld.get());
-        } finally {
-          writeLock.release();
-        }
-        return null;
-      }
-    };
-    results.add(executor.submit(acquireReadLock));
-    results.add(executor.submit(acquireWriteLock));
-    MultithreadedTestUtil.assertOnFutures(results);
-  }
-
-  private static ZKInterProcessReadWriteLock getReadWriteLock(String testName)
-      throws IOException {
-    MetadataHandler handler = new MetadataHandler() {
-      @Override
-      public void handleMetadata(byte[] ownerMetadata) {
-        LOG.info("Lock info: " + Bytes.toString(ownerMetadata));
-      }
-    };
-    ZooKeeperWatcher zkWatcher = getZooKeeperWatcher(testName);
-    String znode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, testName);
-
-    return new ZKInterProcessReadWriteLock(zkWatcher, znode, handler);
-  }
-
-  @Test(timeout = 30000)
-  public void testWriteLockExcludesReaders() throws Exception {
-    // Submit a read lock request first
-    // Submit a write lock request second
-    final String testName = "testReadLockExcludesWriters";
-    List<Future<Void>> results = Lists.newArrayList();
-    final CountDownLatch writeLockAcquiredLatch = new CountDownLatch(1);
-    Callable<Void> acquireWriteLock = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-acquireWriteLock";
-        ZKInterProcessWriteLock writeLock =
-            getReadWriteLock(testName).writeLock(Bytes.toBytes(threadDesc));
-        writeLock.acquire();
-        try {
-          writeLockAcquiredLatch.countDown();
-          assertTrue(isLockHeld.compareAndSet(false, true));
-          Thread.sleep(1000);
-        } finally {
-          isLockHeld.set(false);
-          writeLock.release();
-        }
-        return null;
-      }
-    };
-    Callable<Void> acquireReadLock = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-acquireReadLock";
-        ZKInterProcessReadLock readLock =
-            getReadWriteLock(testName).readLock(Bytes.toBytes(threadDesc));
-        writeLockAcquiredLatch.await();
-        readLock.acquire();
-        try {
-          assertFalse(isLockHeld.get());
-        } finally {
-          readLock.release();
-        }
-        return null;
-      }
-    };
-    results.add(executor.submit(acquireWriteLock));
-    results.add(executor.submit(acquireReadLock));
-    MultithreadedTestUtil.assertOnFutures(results);
-  }
-
-  @Test(timeout = 60000)
-  public void testTimeout() throws Exception {
-    final String testName = "testTimeout";
-    final CountDownLatch lockAcquiredLatch = new CountDownLatch(1);
-    Callable<Void> shouldHog = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-shouldHog";
-        ZKInterProcessWriteLock lock =
-            getReadWriteLock(testName).writeLock(Bytes.toBytes(threadDesc));
-        lock.acquire();
-        lockAcquiredLatch.countDown();
-        Thread.sleep(10000);
-        lock.release();
-        return null;
-      }
-    };
-    Callable<Void> shouldTimeout = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-shouldTimeout";
-        ZKInterProcessWriteLock lock =
-            getReadWriteLock(testName).writeLock(Bytes.toBytes(threadDesc));
-        lockAcquiredLatch.await();
-        assertFalse(lock.tryAcquire(5000));
-        return null;
-      }
-    };
-    Callable<Void> shouldAcquireLock = new Callable<Void>() {
-      @Override
-      public Void call() throws Exception {
-        final String threadDesc = testName + "-shouldAcquireLock";
-        ZKInterProcessWriteLock lock =
-            getReadWriteLock(testName).writeLock(Bytes.toBytes(threadDesc));
-        lockAcquiredLatch.await();
-        assertTrue(lock.tryAcquire(30000));
-        lock.release();
-        return null;
-      }
-    };
-    List<Future<Void>> results = Lists.newArrayList();
-    results.add(executor.submit(shouldHog));
-    results.add(executor.submit(shouldTimeout));
-    results.add(executor.submit(shouldAcquireLock));
-    MultithreadedTestUtil.assertOnFutures(results);
-  }
-
-  @Test(timeout = 60000)
-  public void testMultipleClients() throws Exception {
-    //tests lock usage from multiple zookeeper clients with different sessions.
-    //acquire one read lock, then one write lock
-    final String testName = "testMultipleClients";
-
-    //different zookeeper sessions with separate identifiers
-    ZooKeeperWatcher zkWatcher1 = new ZooKeeperWatcher(conf, "testMultipleClients-1", null);
-    ZooKeeperWatcher zkWatcher2 = new ZooKeeperWatcher(conf, "testMultipleClients-2", null);
-
-    String znode = ZKUtil.joinZNode(zkWatcher1.znodePaths.tableLockZNode, testName);
-
-    ZKInterProcessReadWriteLock clientLock1
-      = new ZKInterProcessReadWriteLock(zkWatcher1, znode, null);
-    ZKInterProcessReadWriteLock clientLock2
-      = new ZKInterProcessReadWriteLock(zkWatcher2, znode, null);
-
-    InterProcessLock lock1 = clientLock1.readLock(Bytes.toBytes("client1"));
-    lock1.acquire();
-
-    //try to acquire, but it will timeout. We are testing whether this will cause any problems
-    //due to the read lock being from another client
-    InterProcessLock lock2 = clientLock2.writeLock(Bytes.toBytes("client2"));
-    assertFalse(lock2.tryAcquire(1000));
-
-    lock1.release();
-
-    //this time it will acquire
-    assertTrue(lock2.tryAcquire(5000));
-    lock2.release();
-    zkWatcher1.close();
-    zkWatcher2.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 285e358..084629e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -3076,7 +3076,7 @@
               <name>User API</name>
               <description>The HBase Application Programmer's API</description>
               <excludePackageNames>
-                org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop
 .hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
+                org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop
 .hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
               </excludePackageNames>
               <!-- switch on dependency-driven aggregation -->
               <includeDependencySources>false</includeDependencySources>
@@ -3133,7 +3133,7 @@
               <name>User API</name>
               <description>The HBase Application Programmer's API</description>
               <excludePackageNames>
-                org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop
 .hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.hbase.zookeeper.lock:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
+                org.apache.hadoop.hbase.backup*:org.apache.hadoop.hbase.catalog:org.apache.hadoop.hbase.client.coprocessor:org.apache.hadoop.hbase.client.metrics:org.apache.hadoop.hbase.codec*:org.apache.hadoop.hbase.constraint:org.apache.hadoop.hbase.coprocessor.*:org.apache.hadoop.hbase.executor:org.apache.hadoop.hbase.fs:*.generated.*:org.apache.hadoop.hbase.io.hfile.*:org.apache.hadoop.hbase.mapreduce.hadoopbackport:org.apache.hadoop.hbase.mapreduce.replication:org.apache.hadoop.hbase.master.*:org.apache.hadoop.hbase.metrics*:org.apache.hadoop.hbase.migration:org.apache.hadoop.hbase.monitoring:org.apache.hadoop.hbase.p*:org.apache.hadoop.hbase.regionserver.compactions:org.apache.hadoop.hbase.regionserver.handler:org.apache.hadoop.hbase.regionserver.snapshot:org.apache.hadoop.hbase.replication.*:org.apache.hadoop.hbase.rest.filter:org.apache.hadoop.hbase.rest.model:org.apache.hadoop.hbase.rest.p*:org.apache.hadoop.hbase.security.*:org.apache.hadoop.hbase.thrift*:org.apache.hadoop
 .hbase.tmpl.*:org.apache.hadoop.hbase.tool:org.apache.hadoop.hbase.trace:org.apache.hadoop.hbase.util.byterange*:org.apache.hadoop.hbase.util.test:org.apache.hadoop.hbase.util.vint:org.apache.hadoop.metrics2*:org.apache.hadoop.hbase.io.compress*
               </excludePackageNames>
               <!-- switch on dependency-driven aggregation -->
               <includeDependencySources>false</includeDependencySources>


[04/50] [abbrv] hbase git commit: HBASE-17474 Reduce frequency of NoSuchMethodException when calling setStoragePolicy()

Posted by el...@apache.org.
HBASE-17474 Reduce frequency of NoSuchMethodException when calling setStoragePolicy()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/287f95a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/287f95a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/287f95a5

Branch: refs/heads/HBASE-16961
Commit: 287f95a579ee95a40e0f3a0986a246d29718ee3b
Parents: faa9f73
Author: Yu Li <li...@apache.org>
Authored: Wed Jan 18 07:30:21 2017 +0800
Committer: Yu Li <li...@apache.org>
Committed: Wed Jan 18 07:30:21 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/287f95a5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
index 6fd1764..e3d39ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionFileSystem.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -196,7 +197,9 @@ public class HRegionFileSystem {
     try {
       ReflectionUtils.invokeMethod(this.fs, "setStoragePolicy", storeDir, policyName);
     } catch (Exception e) {
-      LOG.warn("Failed to set storage policy of [" + storeDir + "] to [" + policyName + "]", e);
+      if (!(this.fs instanceof LocalFileSystem)) {
+        LOG.warn("Failed to set storage policy of [" + storeDir + "] to [" + policyName + "]", e);
+      }
     }
   }
 


[41/50] [abbrv] hbase git commit: HBASE-17067 Procedure v2 - remove zklock/tryLock and use wait/wake (Matteo Bertozzi)

Posted by el...@apache.org.
HBASE-17067 Procedure v2 - remove zklock/tryLock and use wait/wake (Matteo Bertozzi)

This is an amalgam of https://reviews.apache.org/r/54435/ and
https://github.com/matteobertozzi/hbase/commit/9c14863594a8ff67e406d1e0efe0a874f71b858c

Removes notion of suspend/resume from procedure. Instead have the below lock states
and just unschedule if lock is not yet available

 LOCK_ACQUIRED should be returned when the proc has the lock and the proc is ready to execute.
 LOCK_YIELD_WAIT should be returned when the proc has not the lock and the framework
   should take care of readding the procedure back to the runnable set for retry
 LOCK_EVENT_WAIT should be returned when the proc has not the lock and someone will take care of
  readding the procedure back to the runnable set when the lock is available.

Side benefit is being able to undo a bunch of synchronization around
procedure management.

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/980c8c20
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/980c8c20
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/980c8c20

Branch: refs/heads/HBASE-16961
Commit: 980c8c204775e789fae057a6383ff4b725067b83
Parents: ba4a926
Author: Michael Stack <st...@apache.org>
Authored: Thu Jan 19 14:11:53 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Jan 23 09:29:16 2017 -0800

----------------------------------------------------------------------
 .../procedure2/AbstractProcedureScheduler.java  |   2 -
 .../hadoop/hbase/procedure2/Procedure.java      |  54 ++--
 .../hbase/procedure2/ProcedureExecutor.java     |  77 +++--
 .../procedure2/store/wal/WALProcedureStore.java |   1 -
 .../procedure2/ProcedureTestingUtility.java     |  52 ++--
 .../procedure2/TestProcedureSuspended.java      |   5 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |   2 +-
 .../hbase/master/locking/LockProcedure.java     |  28 +-
 .../AbstractStateMachineNamespaceProcedure.java |  13 +-
 .../AbstractStateMachineTableProcedure.java     |  13 +-
 .../procedure/CreateNamespaceProcedure.java     |   9 +-
 .../master/procedure/CreateTableProcedure.java  |   9 +-
 .../master/procedure/MasterProcedureEnv.java    |   5 -
 .../procedure/MasterProcedureScheduler.java     | 279 ++++++++++---------
 .../procedure/MergeTableRegionsProcedure.java   |  12 +-
 .../master/procedure/ServerCrashProcedure.java  |  13 +-
 .../procedure/SplitTableRegionProcedure.java    |   7 +-
 .../hbase/master/locking/TestLockManager.java   |   1 -
 ...ProcedureSchedulerPerformanceEvaluation.java |  40 ++-
 .../MasterProcedureTestingUtility.java          |  13 +-
 .../procedure/TestMasterProcedureEvents.java    |  11 +-
 .../procedure/TestMasterProcedureScheduler.java | 124 ++++-----
 ...TestMasterProcedureSchedulerConcurrency.java |  76 +----
 23 files changed, 425 insertions(+), 421 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index dc94983..ff8d978 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -289,12 +289,10 @@ public abstract class AbstractProcedureScheduler implements ProcedureScheduler {
   }
 
   protected void suspendProcedure(final ProcedureEventQueue event, final Procedure procedure) {
-    procedure.suspend();
     event.suspendProcedure(procedure);
   }
 
   protected void wakeProcedure(final Procedure procedure) {
-    procedure.resume();
     push(procedure, /* addFront= */ true, /* notify= */false);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 3f3cf33..fee5250 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -59,7 +59,13 @@ import com.google.common.annotations.VisibleForTesting;
 @InterfaceStability.Evolving
 public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
   public static final long NO_PROC_ID = -1;
-  public static final int NO_TIMEOUT = -1;
+  protected static final int NO_TIMEOUT = -1;
+
+  public enum LockState {
+    LOCK_ACQUIRED,       // lock acquired and ready to execute
+    LOCK_YIELD_WAIT,     // lock not acquired, framework needs to yield
+    LOCK_EVENT_WAIT,     // lock not acquired, an event will yield the procedure
+  }
 
   // unchanged after initialization
   private NonceKey nonceKey = null;
@@ -80,9 +86,6 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
 
   private volatile byte[] result = null;
 
-  // TODO: it will be nice having pointers to allow the scheduler doing suspend/resume tricks
-  private boolean suspended = false;
-
   /**
    * The main code of the procedure. It must be idempotent since execute()
    * may be called multiple time in case of machine failure in the middle
@@ -142,14 +145,23 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
   /**
    * The user should override this method, and try to take a lock if necessary.
    * A lock can be anything, and it is up to the implementor.
-   * Example: in our Master we can execute request in parallel for different tables
-   *          create t1 and create t2 can be executed at the same time.
-   *          anything else on t1/t2 is queued waiting that specific table create to happen.
    *
-   * @return true if the lock was acquired and false otherwise
+   * <p>Example: in our Master we can execute request in parallel for different tables.
+   * We can create t1 and create t2 and this can be executed at the same time.
+   * Anything else on t1/t2 is queued waiting that specific table create to happen.
+   *
+   * <p>There are 3 LockState:
+   * <ul><li>LOCK_ACQUIRED should be returned when the proc has the lock and the proc is
+   * ready to execute.</li>
+   * <li>LOCK_YIELD_WAIT should be returned when the proc has not the lock and the framework
+   * should take care of readding the procedure back to the runnable set for retry</li>
+   * <li>LOCK_EVENT_WAIT should be returned when the proc has not the lock and someone will
+   * take care of readding the procedure back to the runnable set when the lock is available.
+   * </li></ul>
+   * @return the lock state as described above.
    */
-  protected boolean acquireLock(final TEnvironment env) {
-    return true;
+  protected LockState acquireLock(final TEnvironment env) {
+    return LockState.LOCK_ACQUIRED;
   }
 
   /**
@@ -301,9 +313,6 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    */
   protected void toStringState(StringBuilder builder) {
     builder.append(getState());
-    if (isSuspended()) {
-      builder.append("|SUSPENDED");
-    }
   }
 
   /**
@@ -495,23 +504,6 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
   // ==============================================================================================
 
   /**
-   * @return true if the procedure is in a suspended state,
-   *         waiting for the resources required to execute the procedure will become available.
-   */
-  public synchronized boolean isSuspended() {
-    return suspended;
-  }
-
-  public synchronized void suspend() {
-    suspended = true;
-  }
-
-  public synchronized void resume() {
-    assert isSuspended() : this + " expected suspended state, got " + state;
-    suspended = false;
-  }
-
-  /**
    * @return true if the procedure is in a RUNNABLE state.
    */
   protected synchronized boolean isRunnable() {
@@ -737,7 +729,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    * Internal method called by the ProcedureExecutor that starts the user-level code acquireLock().
    */
   @InterfaceAudience.Private
-  protected boolean doAcquireLock(final TEnvironment env) {
+  protected LockState doAcquireLock(final TEnvironment env) {
     return acquireLock(env);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index 0912cb7..c5f6daf 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException;
+import org.apache.hadoop.hbase.procedure2.Procedure.LockState;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
 import org.apache.hadoop.hbase.procedure2.util.DelayedUtil;
@@ -255,6 +256,7 @@ public class ProcedureExecutor<TEnvironment> {
   private final AtomicBoolean running = new AtomicBoolean(false);
   private final TEnvironment environment;
   private final ProcedureStore store;
+
   private final boolean checkOwnerSet;
 
   public ProcedureExecutor(final Configuration conf, final TEnvironment environment,
@@ -1090,17 +1092,34 @@ public class ProcedureExecutor<TEnvironment> {
       if (!procStack.acquire(proc)) {
         if (procStack.setRollback()) {
           // we have the 'rollback-lock' we can start rollingback
-          if (!executeRollback(rootProcId, procStack)) {
-            procStack.unsetRollback();
-            scheduler.yield(proc);
+          switch (executeRollback(rootProcId, procStack)) {
+            case LOCK_ACQUIRED:
+                break;
+            case LOCK_YIELD_WAIT:
+              scheduler.yield(proc);
+              procStack.unsetRollback();
+              break;
+            case LOCK_EVENT_WAIT:
+              procStack.unsetRollback();
+              break;
+            default:
+              throw new UnsupportedOperationException();
           }
         } else {
           // if we can't rollback means that some child is still running.
           // the rollback will be executed after all the children are done.
           // If the procedure was never executed, remove and mark it as rolledback.
           if (!proc.wasExecuted()) {
-            if (!executeRollback(proc)) {
-              scheduler.yield(proc);
+            switch (executeRollback(proc)) {
+              case LOCK_ACQUIRED:
+                break;
+              case LOCK_YIELD_WAIT:
+                scheduler.yield(proc);
+                break;
+              case LOCK_EVENT_WAIT:
+                break;
+              default:
+                throw new UnsupportedOperationException();
             }
           }
         }
@@ -1109,11 +1128,19 @@ public class ProcedureExecutor<TEnvironment> {
 
       // Execute the procedure
       assert proc.getState() == ProcedureState.RUNNABLE : proc;
-      if (acquireLock(proc)) {
-        execProcedure(procStack, proc);
-        releaseLock(proc, false);
-      } else {
-        scheduler.yield(proc);
+      switch (acquireLock(proc)) {
+        case LOCK_ACQUIRED:
+          execProcedure(procStack, proc);
+          releaseLock(proc, false);
+          break;
+        case LOCK_YIELD_WAIT:
+          scheduler.yield(proc);
+          break;
+        case LOCK_EVENT_WAIT:
+          // someone will wake us up when the lock is available
+          break;
+        default:
+          throw new UnsupportedOperationException();
       }
       procStack.release(proc);
 
@@ -1139,13 +1166,13 @@ public class ProcedureExecutor<TEnvironment> {
     } while (procStack.isFailed());
   }
 
-  private boolean acquireLock(final Procedure proc) {
+  private LockState acquireLock(final Procedure proc) {
     final TEnvironment env = getEnvironment();
     // hasLock() is used in conjunction with holdLock().
     // This allows us to not rewrite or carry around the hasLock() flag
     // for every procedure. the hasLock() have meaning only if holdLock() is true.
     if (proc.holdLock(env) && proc.hasLock(env)) {
-      return true;
+      return LockState.LOCK_ACQUIRED;
     }
     return proc.doAcquireLock(env);
   }
@@ -1164,7 +1191,7 @@ public class ProcedureExecutor<TEnvironment> {
    * Once the procedure is rolledback, the root-procedure will be visible as
    * finished to user, and the result will be the fatal exception.
    */
-  private boolean executeRollback(final long rootProcId, final RootProcedureState procStack) {
+  private LockState executeRollback(final long rootProcId, final RootProcedureState procStack) {
     final Procedure rootProc = procedures.get(rootProcId);
     RemoteProcedureException exception = rootProc.getException();
     if (exception == null) {
@@ -1181,13 +1208,15 @@ public class ProcedureExecutor<TEnvironment> {
     while (stackTail --> 0) {
       final Procedure proc = subprocStack.get(stackTail);
 
-      if (!reuseLock && !acquireLock(proc)) {
+      LockState lockState;
+      if (!reuseLock && (lockState = acquireLock(proc)) != LockState.LOCK_ACQUIRED) {
         // can't take a lock on the procedure, add the root-proc back on the
         // queue waiting for the lock availability
-        return false;
+        return lockState;
       }
 
-      boolean abortRollback = !executeRollback(proc);
+      lockState = executeRollback(proc);
+      boolean abortRollback = lockState != LockState.LOCK_ACQUIRED;
       abortRollback |= !isRunning() || !store.isRunning();
 
       // If the next procedure is the same to this one
@@ -1201,14 +1230,14 @@ public class ProcedureExecutor<TEnvironment> {
       // allows to kill the executor before something is stored to the wal.
       // useful to test the procedure recovery.
       if (abortRollback) {
-        return false;
+        return lockState;
       }
 
       subprocStack.remove(stackTail);
 
       // if the procedure is kind enough to pass the slot to someone else, yield
       if (proc.isYieldAfterExecutionStep(getEnvironment())) {
-        return false;
+        return LockState.LOCK_YIELD_WAIT;
       }
 
       if (proc != rootProc) {
@@ -1221,7 +1250,7 @@ public class ProcedureExecutor<TEnvironment> {
              " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) +
              " exception=" + exception.getMessage());
     procedureFinished(rootProc);
-    return true;
+    return LockState.LOCK_ACQUIRED;
   }
 
   /**
@@ -1229,17 +1258,17 @@ public class ProcedureExecutor<TEnvironment> {
    * It updates the store with the new state (stack index)
    * or will remove completly the procedure in case it is a child.
    */
-  private boolean executeRollback(final Procedure proc) {
+  private LockState executeRollback(final Procedure proc) {
     try {
       proc.doRollback(getEnvironment());
     } catch (IOException e) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Roll back attempt failed for " + proc, e);
       }
-      return false;
+      return LockState.LOCK_YIELD_WAIT;
     } catch (InterruptedException e) {
       handleInterruptedException(proc, e);
-      return false;
+      return LockState.LOCK_YIELD_WAIT;
     } catch (Throwable e) {
       // Catch NullPointerExceptions or similar errors...
       LOG.fatal("CODE-BUG: Uncatched runtime exception for procedure: " + proc, e);
@@ -1250,7 +1279,7 @@ public class ProcedureExecutor<TEnvironment> {
     if (testing != null && testing.shouldKillBeforeStoreUpdate()) {
       LOG.debug("TESTING: Kill before store update");
       stop();
-      return false;
+      return LockState.LOCK_YIELD_WAIT;
     }
 
     if (proc.removeStackIndex()) {
@@ -1270,7 +1299,7 @@ public class ProcedureExecutor<TEnvironment> {
       store.update(proc);
     }
 
-    return true;
+    return LockState.LOCK_ACQUIRED;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index d4d5773..5042554 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -27,7 +27,6 @@ import java.util.Comparator;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
-import java.util.List;
 import java.util.Set;
 import java.util.concurrent.LinkedTransferQueue;
 import java.util.concurrent.TimeUnit;

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index 8aa2088..2a659f8 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.procedure2;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.util.concurrent.Callable;
 import java.util.ArrayList;
 import java.util.Set;
 
@@ -67,27 +68,46 @@ public class ProcedureTestingUtility {
     });
   }
 
-  public static <TEnv> void restart(ProcedureExecutor<TEnv> procExecutor)
-      throws Exception {
-    restart(procExecutor, null, true);
+  public static <TEnv> void restart(final ProcedureExecutor<TEnv> procExecutor) throws Exception {
+    restart(procExecutor, false, true, null, null);
   }
 
-  public static <TEnv> void restart(ProcedureExecutor<TEnv> procExecutor,
-      Runnable beforeStartAction, boolean failOnCorrupted) throws Exception {
-    ProcedureStore procStore = procExecutor.getStore();
-    int storeThreads = procExecutor.getCorePoolSize();
-    int execThreads = procExecutor.getCorePoolSize();
+  public static <TEnv> void restart(final ProcedureExecutor<TEnv> procExecutor,
+      final boolean avoidTestKillDuringRestart, final boolean failOnCorrupted,
+      final Callable<Void> stopAction, final Callable<Void> startAction)
+      throws Exception {
+    final ProcedureStore procStore = procExecutor.getStore();
+    final int storeThreads = procExecutor.getCorePoolSize();
+    final int execThreads = procExecutor.getCorePoolSize();
+
+    final ProcedureExecutor.Testing testing = procExecutor.testing;
+    if (avoidTestKillDuringRestart) {
+      procExecutor.testing = null;
+    }
+
     // stop
+    LOG.info("RESTART - Stop");
     procExecutor.stop();
-    procExecutor.join();
     procStore.stop(false);
-    // nothing running...
-    if (beforeStartAction != null) {
-      beforeStartAction.run();
+    if (stopAction != null) {
+      stopAction.call();
     }
+    procExecutor.join();
+    procExecutor.getScheduler().clear();
+
+    // nothing running...
+
     // re-start
+    LOG.info("RESTART - Start");
     procStore.start(storeThreads);
     procExecutor.start(execThreads, failOnCorrupted);
+    if (startAction != null) {
+      startAction.call();
+    }
+
+    if (avoidTestKillDuringRestart) {
+      procExecutor.testing = testing;
+    }
   }
 
   public static void storeRestart(ProcedureStore procStore, ProcedureStore.ProcedureLoader loader)
@@ -309,11 +329,11 @@ public class ProcedureTestingUtility {
   public static <TEnv> void testRecoveryAndDoubleExecution(final ProcedureExecutor<TEnv> procExec,
       final long procId, final boolean expectFailure, final Runnable customRestart)
       throws Exception {
-    final Procedure proc = procExec.getProcedure(procId);
+    Procedure proc = procExec.getProcedure(procId);
     waitProcedure(procExec, procId);
     assertEquals(false, procExec.isRunning());
-
     for (int i = 0; !procExec.isFinished(procId); ++i) {
+      proc = procExec.getProcedure(procId);
       LOG.info("Restart " + i + " exec state: " + proc);
       if (customRestart != null) {
         customRestart.run();
@@ -415,8 +435,8 @@ public class ProcedureTestingUtility {
 
     // Mark acquire/release lock functions public for test uses.
     @Override
-    public boolean acquireLock(Void env) {
-      return true;
+    public LockState acquireLock(Void env) {
+      return LockState.LOCK_ACQUIRED;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
index 0a8b0e4..ba89768 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java
@@ -212,11 +212,12 @@ public class TestProcedureSuspended {
     }
 
     @Override
-    protected boolean acquireLock(final TestProcEnv env) {
+    protected LockState acquireLock(final TestProcEnv env) {
       if ((hasLock = lock.compareAndSet(false, true))) {
         LOG.info("ACQUIRE LOCK " + this + " " + (hasLock));
+        return LockState.LOCK_ACQUIRED;
       }
-      return hasLock;
+      return LockState.LOCK_YIELD_WAIT;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index fbe8ec6..04c9b43 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1087,7 +1087,7 @@ public class HMaster extends HRegionServer implements MasterServices {
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
     procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
     procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
-        procEnv.getProcedureQueue());
+        procEnv.getProcedureScheduler());
     configurationManager.registerObserver(procEnv);
 
     final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 1a1c8c3..20fc492 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -289,7 +289,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
+  protected LockState acquireLock(final MasterProcedureEnv env) {
     boolean ret = lock.acquireLock(env);
     locked.set(ret);
     hasLock = ret;
@@ -298,8 +298,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
         LOG.debug("LOCKED - " + toString());
       }
       lastHeartBeat.set(System.currentTimeMillis());
+      return LockState.LOCK_ACQUIRED;
     }
-    return ret;
+    LOG.warn("Failed acquire LOCK " + toString() + "; YIELDING");
+    return LockState.LOCK_EVENT_WAIT;
   }
 
   @Override
@@ -414,37 +416,43 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   private class TableExclusiveLock implements LockInterface {
     @Override
     public boolean acquireLock(final MasterProcedureEnv env) {
-      return env.getProcedureScheduler().tryAcquireTableExclusiveLock(LockProcedure.this, tableName);
+      // We invert return from waitNamespaceExclusiveLock; it returns true if you HAVE TO WAIT
+      // to get the lock and false if you don't; i.e. you got the lock.
+      return !env.getProcedureScheduler().waitTableExclusiveLock(LockProcedure.this, tableName);
     }
 
     @Override
     public void releaseLock(final MasterProcedureEnv env) {
-      env.getProcedureScheduler().releaseTableExclusiveLock(LockProcedure.this, tableName);
+      env.getProcedureScheduler().wakeTableExclusiveLock(LockProcedure.this, tableName);
     }
   }
 
   private class TableSharedLock implements LockInterface {
     @Override
     public boolean acquireLock(final MasterProcedureEnv env) {
-      return env.getProcedureScheduler().tryAcquireTableSharedLock(LockProcedure.this, tableName);
+      // We invert return from waitNamespaceExclusiveLock; it returns true if you HAVE TO WAIT
+      // to get the lock and false if you don't; i.e. you got the lock.
+      return !env.getProcedureScheduler().waitTableSharedLock(LockProcedure.this, tableName);
     }
 
     @Override
     public void releaseLock(final MasterProcedureEnv env) {
-      env.getProcedureScheduler().releaseTableSharedLock(LockProcedure.this, tableName);
+      env.getProcedureScheduler().wakeTableSharedLock(LockProcedure.this, tableName);
     }
   }
 
   private class NamespaceExclusiveLock implements LockInterface {
     @Override
     public boolean acquireLock(final MasterProcedureEnv env) {
-      return env.getProcedureScheduler().tryAcquireNamespaceExclusiveLock(
+      // We invert return from waitNamespaceExclusiveLock; it returns true if you HAVE TO WAIT
+      // to get the lock and false if you don't; i.e. you got the lock.
+      return !env.getProcedureScheduler().waitNamespaceExclusiveLock(
           LockProcedure.this, namespace);
     }
 
     @Override
     public void releaseLock(final MasterProcedureEnv env) {
-      env.getProcedureScheduler().releaseNamespaceExclusiveLock(
+      env.getProcedureScheduler().wakeNamespaceExclusiveLock(
           LockProcedure.this, namespace);
     }
   }
@@ -452,6 +460,8 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   private class RegionExclusiveLock implements LockInterface {
     @Override
     public boolean acquireLock(final MasterProcedureEnv env) {
+      // We invert return from waitNamespaceExclusiveLock; it returns true if you HAVE TO WAIT
+      // to get the lock and false if you don't; i.e. you got the lock.
       return !env.getProcedureScheduler().waitRegions(LockProcedure.this, tableName, regionInfos);
     }
 
@@ -460,4 +470,4 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
       env.getProcedureScheduler().wakeRegions(LockProcedure.this, tableName, regionInfos);
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
index a514532..03fdaef 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineNamespaceProcedure.java
@@ -58,13 +58,16 @@ public abstract class AbstractStateMachineNamespaceProcedure<TState>
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
-    if (env.waitInitialized(this)) return false;
-    return env.getProcedureQueue().tryAcquireNamespaceExclusiveLock(this, getNamespaceName());
+  protected LockState acquireLock(final MasterProcedureEnv env) {
+    if (env.waitInitialized(this)) return LockState.LOCK_EVENT_WAIT;
+    if (env.getProcedureScheduler().waitNamespaceExclusiveLock(this, getNamespaceName())) {
+      return LockState.LOCK_EVENT_WAIT;
+    }
+    return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(final MasterProcedureEnv env) {
-    env.getProcedureQueue().releaseNamespaceExclusiveLock(this, getNamespaceName());
+    env.getProcedureScheduler().wakeNamespaceExclusiveLock(this, getNamespaceName());
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
index 7cced45..e957f9d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineTableProcedure.java
@@ -75,14 +75,17 @@ public abstract class AbstractStateMachineTableProcedure<TState>
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
-    if (env.waitInitialized(this)) return false;
-    return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, getTableName());
+  protected LockState acquireLock(final MasterProcedureEnv env) {
+    if (env.waitInitialized(this)) return LockState.LOCK_EVENT_WAIT;
+    if (env.getProcedureScheduler().waitTableExclusiveLock(this, getTableName())) {
+      return LockState.LOCK_EVENT_WAIT;
+    }
+    return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(final MasterProcedureEnv env) {
-    env.getProcedureQueue().releaseTableExclusiveLock(this, getTableName());
+    env.getProcedureScheduler().wakeTableExclusiveLock(this, getTableName());
   }
 
   protected User getUser() {
@@ -108,4 +111,4 @@ public abstract class AbstractStateMachineTableProcedure<TState>
       throw new TableNotFoundException(getTableName());
     }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
index 982e880..2c39c09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateNamespaceProcedure.java
@@ -160,16 +160,19 @@ public class CreateNamespaceProcedure
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
+  protected LockState acquireLock(final MasterProcedureEnv env) {
     if (!env.getMasterServices().isInitialized()) {
       // Namespace manager might not be ready if master is not fully initialized,
       // return false to reject user namespace creation; return true for default
       // and system namespace creation (this is part of master initialization).
       if (!isBootstrapNamespace() && env.waitInitialized(this)) {
-        return false;
+        return LockState.LOCK_EVENT_WAIT;
       }
     }
-    return env.getProcedureQueue().tryAcquireNamespaceExclusiveLock(this, getNamespaceName());
+    if (env.getProcedureScheduler().waitNamespaceExclusiveLock(this, getNamespaceName())) {
+      return LockState.LOCK_EVENT_WAIT;
+    }
+    return LockState.LOCK_ACQUIRED;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
index 0d24f51..2421dfc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CreateTableProcedure.java
@@ -216,11 +216,14 @@ public class CreateTableProcedure
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
+  protected LockState acquireLock(final MasterProcedureEnv env) {
     if (!getTableName().isSystemTable() && env.waitInitialized(this)) {
-      return false;
+      return LockState.LOCK_EVENT_WAIT;
+    }
+    if (env.getProcedureScheduler().waitTableExclusiveLock(this, getTableName())) {
+      return LockState.LOCK_EVENT_WAIT;
     }
-    return env.getProcedureQueue().tryAcquireTableExclusiveLock(this, getTableName());
+    return LockState.LOCK_ACQUIRED;
   }
 
   private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
index 353342a..87c79b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -121,11 +121,6 @@ public class MasterProcedureEnv implements ConfigurationObserver {
     return master.getMasterCoprocessorHost();
   }
 
-  @Deprecated
-  public MasterProcedureScheduler getProcedureQueue() {
-    return procSched;
-  }
-
   public MasterProcedureScheduler getProcedureScheduler() {
     return procSched;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index b9b7b59..bd1b3e0 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -112,8 +112,6 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
 
   private <T extends Comparable<T>> void doAdd(final FairQueue<T> fairq,
       final Queue<T> queue, final Procedure proc, final boolean addFront) {
-    if (proc.isSuspended()) return;
-
     queue.add(proc, addFront);
     if (!queue.hasExclusiveLock() || queue.isLockOwner(proc.getProcId())) {
       // if the queue was not remove for an xlock execution
@@ -157,6 +155,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     final boolean xlockReq = rq.requireExclusiveLock(pollResult);
     if (xlockReq && rq.isLocked() && !rq.hasLockAccess(pollResult)) {
       // someone is already holding the lock (e.g. shared lock). avoid a yield
+      removeFromRunQueue(fairq, rq);
       return null;
     }
 
@@ -177,7 +176,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   }
 
   @Override
-  public void clearQueue() {
+  protected void clearQueue() {
     // Remove Servers
     for (int i = 0; i < serverBuckets.length; ++i) {
       clear(serverBuckets[i], serverRunQueue, SERVER_QUEUE_KEY_COMPARATOR);
@@ -460,7 +459,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     }
 
     @Override
-    public synchronized boolean isAvailable() {
+    public boolean isAvailable() {
       // if there are no items in the queue, or the namespace is locked.
       // we can't execute operation on this table
       if (isEmpty() || namespaceQueue.hasExclusiveLock()) {
@@ -478,7 +477,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return true;
     }
 
-    public synchronized RegionEvent getRegionEvent(final HRegionInfo regionInfo) {
+    public RegionEvent getRegionEvent(final HRegionInfo regionInfo) {
       if (regionEventMap == null) {
         regionEventMap = new HashMap<String, RegionEvent>();
       }
@@ -490,7 +489,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return event;
     }
 
-    public synchronized void removeRegionEvent(final RegionEvent event) {
+    public void removeRegionEvent(final RegionEvent event) {
       regionEventMap.remove(event.getRegionInfo().getEncodedName());
       if (regionEventMap.isEmpty()) {
         regionEventMap = null;
@@ -511,30 +510,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     }
 
     public boolean requireExclusiveLock(Procedure proc) {
-      TableProcedureInterface tpi = (TableProcedureInterface)proc;
-      switch (tpi.getTableOperationType()) {
-        case CREATE:
-        case DELETE:
-        case DISABLE:
-        case ENABLE:
-          return true;
-        case EDIT:
-          // we allow concurrent edit on the NS table
-          return !tpi.getTableName().equals(TableName.NAMESPACE_TABLE_NAME);
-        case READ:
-          return false;
-        // region operations are using the shared-lock on the table
-        // and then they will grab an xlock on the region.
-        case SPLIT:
-        case MERGE:
-        case ASSIGN:
-        case UNASSIGN:
-        case REGION_EDIT:
-          return false;
-        default:
-          break;
-      }
-      throw new UnsupportedOperationException("unexpected type " + tpi.getTableOperationType());
+      return requireTableExclusiveLock((TableProcedureInterface)proc);
     }
   }
 
@@ -589,96 +565,139 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   // ============================================================================
   //  Table Locking Helpers
   // ============================================================================
+  private static boolean requireTableExclusiveLock(TableProcedureInterface proc) {
+    switch (proc.getTableOperationType()) {
+      case CREATE:
+      case DELETE:
+      case DISABLE:
+      case ENABLE:
+        return true;
+      case EDIT:
+        // we allow concurrent edit on the NS table
+        return !proc.getTableName().equals(TableName.NAMESPACE_TABLE_NAME);
+      case READ:
+        return false;
+      // region operations are using the shared-lock on the table
+      // and then they will grab an xlock on the region.
+      case SPLIT:
+      case MERGE:
+      case ASSIGN:
+      case UNASSIGN:
+      case REGION_EDIT:
+        return false;
+      default:
+        break;
+    }
+    throw new UnsupportedOperationException("unexpected type " +
+        proc.getTableOperationType());
+  }
+
   /**
-   * Try to acquire the exclusive lock on the specified table.
-   * other operations in the table-queue will be executed after the lock is released.
+   * Suspend the procedure if the specified table is already locked.
+   * Other operations in the table-queue will be executed after the lock is released.
    * @param procedure the procedure trying to acquire the lock
    * @param table Table to lock
-   * @return true if we were able to acquire the lock on the table, otherwise false.
+   * @return true if the procedure has to wait for the table to be available
    */
-  public boolean tryAcquireTableExclusiveLock(final Procedure procedure, final TableName table) {
+  public boolean waitTableExclusiveLock(final Procedure procedure, final TableName table) {
     schedLock();
     try {
-      final TableQueue queue = getTableQueue(table);
-      if (!queue.getNamespaceQueue().trySharedLock()) {
-        return false;
+      final TableQueue tableQueue = getTableQueue(table);
+      final NamespaceQueue nsQueue = tableQueue.getNamespaceQueue();
+      if (!nsQueue.trySharedLock()) {
+        suspendProcedure(nsQueue.getEvent(), procedure);
+        return true;
       }
-
-      if (!queue.tryExclusiveLock(procedure)) {
-        queue.getNamespaceQueue().releaseSharedLock();
-        return false;
+      if (!tableQueue.tryExclusiveLock(procedure)) {
+        nsQueue.releaseSharedLock();
+        suspendProcedure(tableQueue.getEvent(), procedure);
+        return true;
       }
-
-      removeFromRunQueue(tableRunQueue, queue);
-      return true;
+      removeFromRunQueue(tableRunQueue, tableQueue);
+      return false;
     } finally {
       schedUnlock();
     }
   }
 
   /**
-   * Release the exclusive lock taken with tryAcquireTableWrite()
+   * Wake the procedures waiting for the specified table
    * @param procedure the procedure releasing the lock
    * @param table the name of the table that has the exclusive lock
    */
-  public void releaseTableExclusiveLock(final Procedure procedure, final TableName table) {
+  public void wakeTableExclusiveLock(final Procedure procedure, final TableName table) {
     schedLock();
     try {
-      final TableQueue queue = getTableQueue(table);
-      if (!queue.hasParentLock(procedure)) {
-        queue.releaseExclusiveLock(procedure);
+      final TableQueue tableQueue = getTableQueue(table);
+      int waitingCount = 0;
+
+      if (!tableQueue.hasParentLock(procedure)) {
+        tableQueue.releaseExclusiveLock(procedure);
+        waitingCount += popEventWaitingProcedures(tableQueue.getEvent());
       }
-      queue.getNamespaceQueue().releaseSharedLock();
-      addToRunQueue(tableRunQueue, queue);
+      final NamespaceQueue nsQueue = tableQueue.getNamespaceQueue();
+      if (nsQueue.releaseSharedLock()) {
+        waitingCount += popEventWaitingProcedures(nsQueue.getEvent());
+      }
+      addToRunQueue(tableRunQueue, tableQueue);
+      wakePollIfNeeded(waitingCount);
     } finally {
       schedUnlock();
     }
   }
 
   /**
-   * Try to acquire the shared lock on the specified table.
+   * Suspend the procedure if the specified table is already locked.
    * other "read" operations in the table-queue may be executed concurrently,
    * @param procedure the procedure trying to acquire the lock
    * @param table Table to lock
-   * @return true if we were able to acquire the lock on the table, otherwise false.
+   * @return true if the procedure has to wait for the table to be available
    */
-  public boolean tryAcquireTableSharedLock(final Procedure procedure, final TableName table) {
-    return tryAcquireTableQueueSharedLock(procedure, table) != null;
+  public boolean waitTableSharedLock(final Procedure procedure, final TableName table) {
+    return waitTableQueueSharedLock(procedure, table) == null;
   }
 
-  private TableQueue tryAcquireTableQueueSharedLock(final Procedure procedure,
-      final TableName table) {
+  private TableQueue waitTableQueueSharedLock(final Procedure procedure, final TableName table) {
     schedLock();
     try {
-      final TableQueue queue = getTableQueue(table);
-      if (!queue.getNamespaceQueue().trySharedLock()) {
+      final TableQueue tableQueue = getTableQueue(table);
+      final NamespaceQueue nsQueue = tableQueue.getNamespaceQueue();
+      if (!nsQueue.trySharedLock()) {
+        suspendProcedure(nsQueue.getEvent(), procedure);
         return null;
       }
 
-      if (!queue.trySharedLock()) {
-        queue.getNamespaceQueue().releaseSharedLock();
+      if (!tableQueue.trySharedLock()) {
+        tableQueue.getNamespaceQueue().releaseSharedLock();
+        suspendProcedure(tableQueue.getEvent(), procedure);
         return null;
       }
 
-      return queue;
+      return tableQueue;
     } finally {
       schedUnlock();
     }
   }
 
   /**
-   * Release the shared lock taken with tryAcquireTableRead()
+   * Wake the procedures waiting for the specified table
    * @param procedure the procedure releasing the lock
    * @param table the name of the table that has the shared lock
    */
-  public void releaseTableSharedLock(final Procedure procedure, final TableName table) {
+  public void wakeTableSharedLock(final Procedure procedure, final TableName table) {
     schedLock();
     try {
-      final TableQueue queue = getTableQueue(table);
-      if (queue.releaseSharedLock()) {
-        addToRunQueue(tableRunQueue, queue);
+      final TableQueue tableQueue = getTableQueue(table);
+      final NamespaceQueue nsQueue = tableQueue.getNamespaceQueue();
+      int waitingCount = 0;
+      if (tableQueue.releaseSharedLock()) {
+        addToRunQueue(tableRunQueue, tableQueue);
+        waitingCount += popEventWaitingProcedures(tableQueue.getEvent());
+      }
+      if (nsQueue.releaseSharedLock()) {
+        waitingCount += popEventWaitingProcedures(nsQueue.getEvent());
       }
-      queue.getNamespaceQueue().releaseSharedLock();
+      wakePollIfNeeded(waitingCount);
     } finally {
       schedUnlock();
     }
@@ -746,7 +765,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       queue = getTableQueueWithLock(table);
     } else {
       // acquire the table shared-lock
-      queue = tryAcquireTableQueueSharedLock(procedure, table);
+      queue = waitTableQueueSharedLock(procedure, table);
       if (queue == null) return true;
     }
 
@@ -771,7 +790,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     }
 
     if (!hasLock && !procedure.hasParent()) {
-      releaseTableSharedLock(procedure, table);
+      wakeTableSharedLock(procedure, table);
     }
     return !hasLock;
   }
@@ -822,13 +841,11 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       for (int i = numProcs - 1; i >= 0; --i) {
         wakeProcedure(nextProcs[i]);
       }
-
       wakePollIfNeeded(numProcs);
-
       if (!procedure.hasParent()) {
         // release the table shared-lock.
         // (if we have a parent, it is holding an xlock so we didn't take the shared-lock)
-        releaseTableSharedLock(procedure, table);
+        wakeTableSharedLock(procedure, table);
       }
     } finally {
       schedUnlock();
@@ -839,45 +856,52 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   //  Namespace Locking Helpers
   // ============================================================================
   /**
-   * Try to acquire the exclusive lock on the specified namespace.
-   * @see #releaseNamespaceExclusiveLock(Procedure,String)
+   * Suspend the procedure if the specified namespace is already locked.
+   * @see #wakeNamespaceExclusiveLock(Procedure,String)
    * @param procedure the procedure trying to acquire the lock
    * @param nsName Namespace to lock
-   * @return true if we were able to acquire the lock on the namespace, otherwise false.
+   * @return true if the procedure has to wait for the namespace to be available
    */
-  public boolean tryAcquireNamespaceExclusiveLock(final Procedure procedure, final String nsName) {
+  public boolean waitNamespaceExclusiveLock(final Procedure procedure, final String nsName) {
     schedLock();
     try {
-      TableQueue tableQueue = getTableQueue(TableName.NAMESPACE_TABLE_NAME);
-      if (!tableQueue.trySharedLock()) return false;
+      final TableQueue tableQueue = getTableQueue(TableName.NAMESPACE_TABLE_NAME);
+      if (!tableQueue.trySharedLock()) {
+        suspendProcedure(tableQueue.getEvent(), procedure);
+        return true;
+      }
 
-      NamespaceQueue nsQueue = getNamespaceQueue(nsName);
-      boolean hasLock = nsQueue.tryExclusiveLock(procedure);
-      if (!hasLock) {
+      final NamespaceQueue nsQueue = getNamespaceQueue(nsName);
+      if (!nsQueue.tryExclusiveLock(procedure)) {
         tableQueue.releaseSharedLock();
+        suspendProcedure(nsQueue.getEvent(), procedure);
+        return true;
       }
-      return hasLock;
+      return false;
     } finally {
       schedUnlock();
     }
   }
 
   /**
-   * Release the exclusive lock
-   * @see #tryAcquireNamespaceExclusiveLock(Procedure,String)
+   * Wake the procedures waiting for the specified namespace
+   * @see #waitNamespaceExclusiveLock(Procedure,String)
    * @param procedure the procedure releasing the lock
    * @param nsName the namespace that has the exclusive lock
    */
-  public void releaseNamespaceExclusiveLock(final Procedure procedure, final String nsName) {
+  public void wakeNamespaceExclusiveLock(final Procedure procedure, final String nsName) {
     schedLock();
     try {
       final TableQueue tableQueue = getTableQueue(TableName.NAMESPACE_TABLE_NAME);
-      final NamespaceQueue queue = getNamespaceQueue(nsName);
-
-      queue.releaseExclusiveLock(procedure);
+      final NamespaceQueue nsQueue = getNamespaceQueue(nsName);
+      int waitingCount = 0;
+      nsQueue.releaseExclusiveLock(procedure);
       if (tableQueue.releaseSharedLock()) {
         addToRunQueue(tableRunQueue, tableQueue);
+        waitingCount += popEventWaitingProcedures(tableQueue.getEvent());
       }
+      waitingCount += popEventWaitingProcedures(nsQueue.getEvent());
+      wakePollIfNeeded(waitingCount);
     } finally {
       schedUnlock();
     }
@@ -888,67 +912,45 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   // ============================================================================
   /**
    * Try to acquire the exclusive lock on the specified server.
-   * @see #releaseServerExclusiveLock(Procedure,ServerName)
+   * @see #wakeServerExclusiveLock(Procedure,ServerName)
    * @param procedure the procedure trying to acquire the lock
    * @param serverName Server to lock
-   * @return true if we were able to acquire the lock on the server, otherwise false.
+   * @return true if the procedure has to wait for the server to be available
    */
-  public boolean tryAcquireServerExclusiveLock(final Procedure procedure,
-      final ServerName serverName) {
+  public boolean waitServerExclusiveLock(final Procedure procedure, final ServerName serverName) {
     schedLock();
     try {
       ServerQueue queue = getServerQueue(serverName);
       if (queue.tryExclusiveLock(procedure)) {
         removeFromRunQueue(serverRunQueue, queue);
-        return true;
+        return false;
       }
+      suspendProcedure(queue.getEvent(), procedure);
+      return true;
     } finally {
       schedUnlock();
     }
-    return false;
   }
 
   /**
-   * Release the exclusive lock
-   * @see #tryAcquireServerExclusiveLock(Procedure,ServerName)
+   * Wake the procedures waiting for the specified server
+   * @see #waitServerExclusiveLock(Procedure,ServerName)
    * @param procedure the procedure releasing the lock
    * @param serverName the server that has the exclusive lock
    */
-  public void releaseServerExclusiveLock(final Procedure procedure,
-      final ServerName serverName) {
+  public void wakeServerExclusiveLock(final Procedure procedure, final ServerName serverName) {
     schedLock();
     try {
-      ServerQueue queue = getServerQueue(serverName);
+      final ServerQueue queue = getServerQueue(serverName);
       queue.releaseExclusiveLock(procedure);
       addToRunQueue(serverRunQueue, queue);
+      int waitingCount = popEventWaitingProcedures(queue.getEvent());
+      wakePollIfNeeded(waitingCount);
     } finally {
       schedUnlock();
     }
   }
 
-  /**
-   * Try to acquire the shared lock on the specified server.
-   * @see #releaseServerSharedLock(Procedure,ServerName)
-   * @param procedure the procedure releasing the lock
-   * @param serverName Server to lock
-   * @return true if we were able to acquire the lock on the server, otherwise false.
-   */
-  public boolean tryAcquireServerSharedLock(final Procedure procedure,
-      final ServerName serverName) {
-    return getServerQueueWithLock(serverName).trySharedLock();
-  }
-
-  /**
-   * Release the shared lock taken
-   * @see #tryAcquireServerSharedLock(Procedure,ServerName)
-   * @param procedure the procedure releasing the lock
-   * @param serverName the server that has the shared lock
-   */
-  public void releaseServerSharedLock(final Procedure procedure,
-      final ServerName serverName) {
-    getServerQueueWithLock(serverName).releaseSharedLock();
-  }
-
   // ============================================================================
   //  Generic Helpers
   // ============================================================================
@@ -965,8 +967,12 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     boolean isSuspended();
   }
 
+  // TODO Why OK not having synchronized access and/or volatiles and
+  // sharedLock-- and sharedLock++? Is this accessed by one thread only?
+  // Write up the concurrency expectations. St.Ack 01/19/2017
   private static abstract class Queue<TKey extends Comparable<TKey>>
       extends AvlLinkedNode<Queue<TKey>> implements QueueInterface {
+    private final ProcedureEventQueue event;
     private boolean suspended = false;
 
     private long exclusiveLockProcIdOwner = Long.MIN_VALUE;
@@ -982,6 +988,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     public Queue(TKey key, int priority) {
       this.key = key;
       this.priority = priority;
+      this.event = new ProcedureEventQueue();
     }
 
     protected TKey getKey() {
@@ -992,6 +999,10 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return priority;
     }
 
+    public ProcedureEventQueue getEvent() {
+      return event;
+    }
+
     /**
      * True if the queue is not in the run-queue and it is owned by an event.
      */
@@ -1008,48 +1019,48 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     // ======================================================================
     //  Read/Write Locking helpers
     // ======================================================================
-    public synchronized boolean isLocked() {
+    public boolean isLocked() {
       return hasExclusiveLock() || sharedLock > 0;
     }
 
-    public synchronized boolean hasExclusiveLock() {
+    public boolean hasExclusiveLock() {
       return this.exclusiveLockProcIdOwner != Long.MIN_VALUE;
     }
 
-    public synchronized boolean trySharedLock() {
+    public boolean trySharedLock() {
       if (hasExclusiveLock()) return false;
       sharedLock++;
       return true;
     }
 
-    public synchronized boolean releaseSharedLock() {
+    public boolean releaseSharedLock() {
       return --sharedLock == 0;
     }
 
-    protected synchronized boolean isSingleSharedLock() {
+    protected boolean isSingleSharedLock() {
       return sharedLock == 1;
     }
 
-    public synchronized boolean isLockOwner(long procId) {
+    public boolean isLockOwner(long procId) {
       return exclusiveLockProcIdOwner == procId;
     }
 
-    public synchronized boolean hasParentLock(final Procedure proc) {
+    public boolean hasParentLock(final Procedure proc) {
       return proc.hasParent() &&
         (isLockOwner(proc.getParentProcId()) || isLockOwner(proc.getRootProcId()));
     }
 
-    public synchronized boolean hasLockAccess(final Procedure proc) {
+    public boolean hasLockAccess(final Procedure proc) {
       return isLockOwner(proc.getProcId()) || hasParentLock(proc);
     }
 
-    public synchronized boolean tryExclusiveLock(final Procedure proc) {
+    public boolean tryExclusiveLock(final Procedure proc) {
       if (isLocked()) return hasLockAccess(proc);
       exclusiveLockProcIdOwner = proc.getProcId();
       return true;
     }
 
-    public synchronized boolean releaseExclusiveLock(final Procedure proc) {
+    public boolean releaseExclusiveLock(final Procedure proc) {
       if (isLockOwner(proc.getProcId())) {
         exclusiveLockProcIdOwner = Long.MIN_VALUE;
         return true;
@@ -1059,7 +1070,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
 
     // This should go away when we have the new AM and its events
     // and we move xlock to the lock-event-queue.
-    public synchronized boolean isAvailable() {
+    public boolean isAvailable() {
       return !hasExclusiveLock() && !isEmpty();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
index c313700..d7fe5f6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MergeTableRegionsProcedure.java
@@ -322,17 +322,19 @@ public class MergeTableRegionsProcedure
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
+  protected LockState acquireLock(final MasterProcedureEnv env) {
     if (env.waitInitialized(this)) {
-      return false;
+      return LockState.LOCK_EVENT_WAIT;
     }
-    return !env.getProcedureQueue().waitRegions(
-      this, getTableName(), regionsToMerge[0], regionsToMerge[1]);
+    return env.getProcedureScheduler().waitRegions(this, getTableName(),
+        regionsToMerge[0], regionsToMerge[1])?
+            LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(final MasterProcedureEnv env) {
-    env.getProcedureQueue().wakeRegions(this, getTableName(), regionsToMerge[0], regionsToMerge[1]);
+    env.getProcedureScheduler().wakeRegions(this, getTableName(),
+        regionsToMerge[0], regionsToMerge[1]);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
index 98a2152..7b4eb6e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ServerCrashProcedure.java
@@ -562,14 +562,19 @@ implements ServerProcedureInterface {
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
-    if (env.waitServerCrashProcessingEnabled(this)) return false;
-    return env.getProcedureQueue().tryAcquireServerExclusiveLock(this, getServerName());
+  protected LockState acquireLock(final MasterProcedureEnv env) {
+    // TODO: Put this BACK AFTER AMv2 goes in!!!!
+    // if (env.waitFailoverCleanup(this)) return LockState.LOCK_EVENT_WAIT;
+    if (env.waitServerCrashProcessingEnabled(this)) return LockState.LOCK_EVENT_WAIT;
+    if (env.getProcedureScheduler().waitServerExclusiveLock(this, getServerName())) {
+      return LockState.LOCK_EVENT_WAIT;
+    }
+    return LockState.LOCK_ACQUIRED;
   }
 
   @Override
   protected void releaseLock(final MasterProcedureEnv env) {
-    env.getProcedureQueue().releaseServerExclusiveLock(this, getServerName());
+    env.getProcedureScheduler().wakeServerExclusiveLock(this, getServerName());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
index 4730ad8..69b89be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/SplitTableRegionProcedure.java
@@ -347,11 +347,12 @@ public class SplitTableRegionProcedure
   }
 
   @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
+  protected LockState acquireLock(final MasterProcedureEnv env) {
     if (env.waitInitialized(this)) {
-      return false;
+      return LockState.LOCK_EVENT_WAIT;
     }
-    return !env.getProcedureScheduler().waitRegions(this, getTableName(), parentHRI);
+    return env.getProcedureScheduler().waitRegions(this, getTableName(), parentHRI)?
+        LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
index 1f3241d..fa43fbd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
@@ -59,7 +59,6 @@ public class TestLockManager {
 
   private static final Log LOG = LogFactory.getLog(TestLockProcedure.class);
   protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-  private static final Configuration conf = UTIL.getConfiguration();
   private static MasterServices masterServices;
 
   private static String namespace = "namespace";

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
index efa45e7..2b28c9f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
@@ -62,11 +62,11 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
           + "proportion of table:region ops is 1:regions_per_table. Default: "
           + DEFAULT_OPS_TYPE);
 
-  private int numTables;
-  private int regionsPerTable;
-  private int numOps;
-  private int numThreads;
-  private String opsType;
+  private int numTables = DEFAULT_NUM_TABLES;
+  private int regionsPerTable = DEFAULT_REGIONS_PER_TABLE;
+  private int numOps = DEFAULT_NUM_OPERATIONS;
+  private int numThreads = DEFAULT_NUM_THREADS;
+  private String opsType = DEFAULT_OPS_TYPE;
 
   private MasterProcedureScheduler procedureScheduler;
   // List of table/region procedures to schedule.
@@ -83,10 +83,13 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
       super(procId, hri.getTable(), TableOperationType.UNASSIGN, hri);
     }
 
-    public boolean acquireLock(Void env) {
-      return !procedureScheduler.waitRegions(this, getTableName(), getRegionInfo());
+    @Override
+    public LockState acquireLock(Void env) {
+      return procedureScheduler.waitRegions(this, getTableName(), getRegionInfo())?
+        LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED;
     }
 
+    @Override
     public void releaseLock(Void env) {
       procedureScheduler.wakeRegions(this, getTableName(), getRegionInfo());
     }
@@ -110,12 +113,15 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
       super(procId, tableName, TableOperationType.EDIT);
     }
 
-    public boolean acquireLock(Void env) {
-      return procedureScheduler.tryAcquireTableExclusiveLock(this, getTableName());
+    @Override
+    public LockState acquireLock(Void env) {
+      return procedureScheduler.waitTableExclusiveLock(this, getTableName())?
+        LockState.LOCK_EVENT_WAIT: LockState.LOCK_ACQUIRED;
     }
 
+    @Override
     public void releaseLock(Void env) {
-      procedureScheduler.releaseTableExclusiveLock(this, getTableName());
+      procedureScheduler.wakeTableExclusiveLock(this, getTableName());
     }
   }
 
@@ -212,11 +218,15 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
           continue;
         }
 
-        if (proc.acquireLock(null)) {
-          completed.incrementAndGet();
-          proc.releaseLock(null);
-        } else {
-          procedureScheduler.yield(proc);
+        switch (proc.acquireLock(null)) {
+          case LOCK_ACQUIRED:
+            completed.incrementAndGet();
+            proc.releaseLock(null);
+            break;
+          case LOCK_YIELD_WAIT:
+            break;
+          case LOCK_EVENT_WAIT:
+            break;
         }
         if (completed.get() % 100000 == 0) {
           System.out.println("Completed " + completed.get() + " procedures.");

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
index 2bd4f44..7e6691d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureTestingUtility.java
@@ -18,10 +18,14 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 import java.util.TreeSet;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -45,17 +49,12 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.TableStateManager;
-import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.MD5Hash;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hbase.util.ModifyRegionUtils;
 
 public class MasterProcedureTestingUtility {
   private static final Log LOG = LogFactory.getLog(MasterProcedureTestingUtility.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
index a88eb62..450714f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureEvents.java
@@ -18,14 +18,15 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.junit.Assert.assertEquals;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
@@ -37,16 +38,12 @@ import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
-
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
 @Category({MasterTests.class, MediumTests.class})
 public class TestMasterProcedureEvents {
   private static final Log LOG = LogFactory.getLog(TestCreateTableProcedure.class);
@@ -141,7 +138,7 @@ public class TestMasterProcedureEvents {
   private void testProcedureEventWaitWake(final HMaster master, final ProcedureEvent event,
       final Procedure proc) throws Exception {
     final ProcedureExecutor<MasterProcedureEnv> procExec = master.getMasterProcedureExecutor();
-    final MasterProcedureScheduler procSched = procExec.getEnvironment().getProcedureQueue();
+    final MasterProcedureScheduler procSched = procExec.getEnvironment().getProcedureScheduler();
 
     final long startPollCalls = procSched.getPollCalls();
     final long startNullPollCalls = procSched.getNullPollCalls();

http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index dc60710..438736e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -25,7 +29,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -39,10 +42,6 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
 @Category({MasterTests.class, SmallTests.class})
 public class TestMasterProcedureScheduler {
   private static final Log LOG = LogFactory.getLog(TestMasterProcedureScheduler.class);
@@ -89,8 +88,8 @@ public class TestMasterProcedureScheduler {
         Procedure proc = queue.poll();
         assertTrue(proc != null);
         TableName tableName = ((TestTableProcedure)proc).getTableName();
-        queue.tryAcquireTableExclusiveLock(proc, tableName);
-        queue.releaseTableExclusiveLock(proc, tableName);
+        queue.waitTableExclusiveLock(proc, tableName);
+        queue.wakeTableExclusiveLock(proc, tableName);
         queue.completionCleanup(proc);
         assertEquals(--count, queue.size());
         assertEquals(i * 1000 + j, proc.getProcId());
@@ -128,12 +127,12 @@ public class TestMasterProcedureScheduler {
     Procedure proc = queue.poll();
     assertEquals(1, proc.getProcId());
     // take the xlock
-    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
     // table can't be deleted because we have the lock
     assertEquals(0, queue.size());
     assertFalse(queue.markTableAsDeleted(tableName, dummyProc));
     // release the xlock
-    queue.releaseTableExclusiveLock(proc, tableName);
+    queue.wakeTableExclusiveLock(proc, tableName);
     // complete the table deletion
     assertTrue(queue.markTableAsDeleted(tableName, proc));
   }
@@ -164,7 +163,7 @@ public class TestMasterProcedureScheduler {
       Procedure proc = procs[i] = queue.poll();
       assertEquals(i + 1, proc.getProcId());
       // take the rlock
-      assertTrue(queue.tryAcquireTableSharedLock(proc, tableName));
+      assertEquals(false, queue.waitTableSharedLock(proc, tableName));
       // table can't be deleted because we have locks and/or items in the queue
       assertFalse(queue.markTableAsDeleted(tableName, dummyProc));
     }
@@ -173,7 +172,7 @@ public class TestMasterProcedureScheduler {
       // table can't be deleted because we have locks
       assertFalse(queue.markTableAsDeleted(tableName, dummyProc));
       // release the rlock
-      queue.releaseTableSharedLock(procs[i], tableName);
+      queue.wakeTableSharedLock(procs[i], tableName);
     }
 
     // there are no items and no lock in the queeu
@@ -202,48 +201,48 @@ public class TestMasterProcedureScheduler {
     // Fetch the 1st item and take the write lock
     Procedure proc = queue.poll();
     assertEquals(1, proc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
 
     // Fetch the 2nd item and verify that the lock can't be acquired
     assertEquals(null, queue.poll(0));
 
     // Release the write lock and acquire the read lock
-    queue.releaseTableExclusiveLock(proc, tableName);
+    queue.wakeTableExclusiveLock(proc, tableName);
 
     // Fetch the 2nd item and take the read lock
     Procedure rdProc = queue.poll();
     assertEquals(2, rdProc.getProcId());
-    assertEquals(true, queue.tryAcquireTableSharedLock(rdProc, tableName));
+    assertEquals(false, queue.waitTableSharedLock(rdProc, tableName));
 
     // Fetch the 3rd item and verify that the lock can't be acquired
     assertEquals(null, queue.poll(0));
 
     // release the rdlock of item 2 and take the wrlock for the 3d item
-    queue.releaseTableSharedLock(rdProc, tableName);
+    queue.wakeTableSharedLock(rdProc, tableName);
 
     // Fetch the 3rd item and take the write lock
     Procedure wrProc = queue.poll();
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(wrProc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(wrProc, tableName));
 
     // Fetch 4th item and verify that the lock can't be acquired
     assertEquals(null, queue.poll(0));
 
     // Release the write lock and acquire the read lock
-    queue.releaseTableExclusiveLock(wrProc, tableName);
+    queue.wakeTableExclusiveLock(wrProc, tableName);
 
     // Fetch the 4th item and take the read lock
     rdProc = queue.poll();
     assertEquals(4, rdProc.getProcId());
-    assertEquals(true, queue.tryAcquireTableSharedLock(rdProc, tableName));
+    assertEquals(false, queue.waitTableSharedLock(rdProc, tableName));
 
     // Fetch the 4th item and take the read lock
     Procedure rdProc2 = queue.poll();
     assertEquals(5, rdProc2.getProcId());
-    assertEquals(true, queue.tryAcquireTableSharedLock(rdProc2, tableName));
+    assertEquals(false, queue.waitTableSharedLock(rdProc2, tableName));
 
     // Release 4th and 5th read-lock
-    queue.releaseTableSharedLock(rdProc, tableName);
-    queue.releaseTableSharedLock(rdProc2, tableName);
+    queue.wakeTableSharedLock(rdProc, tableName);
+    queue.wakeTableSharedLock(rdProc2, tableName);
 
     // remove table queue
     assertEquals(0, queue.size());
@@ -268,34 +267,36 @@ public class TestMasterProcedureScheduler {
     // Fetch the 1st item and take the write lock
     Procedure procNs1 = queue.poll();
     assertEquals(1, procNs1.getProcId());
-    assertEquals(true, queue.tryAcquireNamespaceExclusiveLock(procNs1, nsName1));
+    assertEquals(false, queue.waitNamespaceExclusiveLock(procNs1, nsName1));
 
     // System tables have 2 as default priority
     Procedure procNs2 = queue.poll();
     assertEquals(4, procNs2.getProcId());
-    assertEquals(true, queue.tryAcquireNamespaceExclusiveLock(procNs2, nsName2));
-    queue.releaseNamespaceExclusiveLock(procNs2, nsName2);
+    assertEquals(false, queue.waitNamespaceExclusiveLock(procNs2, nsName2));
+    queue.wakeNamespaceExclusiveLock(procNs2, nsName2);
+
+    // add procNs2 back in the queue
     queue.yield(procNs2);
 
     // table on ns1 is locked, so we get table on ns2
     procNs2 = queue.poll();
     assertEquals(3, procNs2.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(procNs2, tableName2));
+    assertEquals(false, queue.waitTableExclusiveLock(procNs2, tableName2));
 
     // ns2 is not available (TODO we may avoid this one)
     Procedure procNs2b = queue.poll();
     assertEquals(4, procNs2b.getProcId());
-    assertEquals(false, queue.tryAcquireNamespaceExclusiveLock(procNs2b, nsName2));
-    queue.yield(procNs2b);
+    assertEquals(true, queue.waitNamespaceExclusiveLock(procNs2b, nsName2));
 
     // release the ns1 lock
-    queue.releaseNamespaceExclusiveLock(procNs1, nsName1);
+    queue.wakeNamespaceExclusiveLock(procNs1, nsName1);
 
     // we are now able to execute table of ns1
     long procId = queue.poll().getProcId();
     assertEquals(2, procId);
 
-    queue.releaseTableExclusiveLock(procNs2, tableName2);
+    // release ns2
+    queue.wakeTableExclusiveLock(procNs2, tableName2);
 
     // we are now able to execute ns2
     procId = queue.poll().getProcId();
@@ -314,35 +315,18 @@ public class TestMasterProcedureScheduler {
     // Fetch the ns item and take the xlock
     Procedure proc = queue.poll();
     assertEquals(1, proc.getProcId());
-    assertEquals(true, queue.tryAcquireNamespaceExclusiveLock(proc, nsName));
+    assertEquals(false, queue.waitNamespaceExclusiveLock(proc, nsName));
 
     // the table operation can't be executed because the ns is locked
     assertEquals(null, queue.poll(0));
 
     // release the ns lock
-    queue.releaseNamespaceExclusiveLock(proc, nsName);
+    queue.wakeNamespaceExclusiveLock(proc, nsName);
 
     proc = queue.poll();
     assertEquals(2, proc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
-    queue.releaseTableExclusiveLock(proc, tableName);
-  }
-
-  @Test
-  public void testSharedLock() throws Exception {
-    final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-
-    final TableName tableName = TableName.valueOf("testtb");
-    TestTableProcedure procA =
-        new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ);
-    TestTableProcedure procB =
-        new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ);
-
-    assertTrue(queue.tryAcquireTableSharedLock(procA, tableName));
-    assertTrue(queue.tryAcquireTableSharedLock(procB, tableName));
-
-    queue.releaseTableSharedLock(procA, tableName);
-    queue.releaseTableSharedLock(procB, tableName);
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
+    queue.wakeTableExclusiveLock(proc, tableName);
   }
 
   @Test
@@ -371,13 +355,13 @@ public class TestMasterProcedureScheduler {
     // Fetch the 2nd item and take the xlock
     proc = queue.poll();
     assertEquals(2, proc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
 
     // everything is locked by the table operation
     assertEquals(null, queue.poll(0));
 
     // release the table xlock
-    queue.releaseTableExclusiveLock(proc, tableName);
+    queue.wakeTableExclusiveLock(proc, tableName);
 
     // grab the last item in the queue
     proc = queue.poll();
@@ -410,13 +394,13 @@ public class TestMasterProcedureScheduler {
     // Fetch the 1st item and take the write lock
     Procedure proc = queue.poll();
     assertEquals(1, proc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
 
     // everything is locked by the table operation
     assertEquals(null, queue.poll(0));
 
     // release the table lock
-    queue.releaseTableExclusiveLock(proc, tableName);
+    queue.wakeTableExclusiveLock(proc, tableName);
 
     // Fetch the 2nd item and the the lock on regionA and regionB
     Procedure mergeProc = queue.poll();
@@ -475,7 +459,7 @@ public class TestMasterProcedureScheduler {
     // Fetch the 1st item from the queue, "the root procedure" and take the table lock
     Procedure rootProc = queue.poll();
     assertEquals(1, rootProc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(rootProc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(rootProc, tableName));
     assertEquals(null, queue.poll(0));
 
     // Execute the 1st step of the root-proc.
@@ -519,7 +503,7 @@ public class TestMasterProcedureScheduler {
     assertEquals(null, queue.poll(0));
 
     // release the table lock (for the root procedure)
-    queue.releaseTableExclusiveLock(rootProc, tableName);
+    queue.wakeTableExclusiveLock(rootProc, tableName);
   }
 
   @Test
@@ -639,7 +623,7 @@ public class TestMasterProcedureScheduler {
     // fetch and acquire first xlock proc
     Procedure parentProc = queue.poll();
     assertEquals(rootProc, parentProc);
-    assertTrue(queue.tryAcquireTableExclusiveLock(parentProc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(parentProc, tableName));
 
     // add child procedure
     for (int i = 0; i < childProcs.length; ++i) {
@@ -662,13 +646,13 @@ public class TestMasterProcedureScheduler {
     assertEquals(null, queue.poll(0));
 
     // release xlock
-    queue.releaseTableExclusiveLock(parentProc, tableName);
+    queue.wakeTableExclusiveLock(parentProc, tableName);
 
     // fetch the other xlock proc
     Procedure proc = queue.poll();
     assertEquals(100, proc.getProcId());
-    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
-    queue.releaseTableExclusiveLock(proc, tableName);
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
+    queue.wakeTableExclusiveLock(proc, tableName);
   }
 
   @Test
@@ -697,7 +681,7 @@ public class TestMasterProcedureScheduler {
     // fetch and acquire first xlock proc
     Procedure parentProc = queue.poll();
     assertEquals(rootProc, parentProc);
-    assertTrue(queue.tryAcquireTableExclusiveLock(parentProc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(parentProc, tableName));
 
     // add child procedure
     queue.addFront(childProc);
@@ -705,11 +689,11 @@ public class TestMasterProcedureScheduler {
     // fetch the other xlock proc
     Procedure proc = queue.poll();
     assertEquals(childProc, proc);
-    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
-    queue.releaseTableExclusiveLock(proc, tableName);
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
+    queue.wakeTableExclusiveLock(proc, tableName);
 
     // release xlock
-    queue.releaseTableExclusiveLock(parentProc, tableName);
+    queue.wakeTableExclusiveLock(parentProc, tableName);
   }
 
   @Test
@@ -724,7 +708,7 @@ public class TestMasterProcedureScheduler {
     // fetch from the queue and acquire xlock for the first proc
     Procedure proc = queue.poll();
     assertEquals(1, proc.getProcId());
-    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(false, queue.waitTableExclusiveLock(proc, tableName));
 
     // nothing available, until xlock release
     assertEquals(null, queue.poll(0));
@@ -737,7 +721,7 @@ public class TestMasterProcedureScheduler {
     assertEquals(1, proc.getProcId());
 
     // release the xlock
-    queue.releaseTableExclusiveLock(proc, tableName);
+    queue.wakeTableExclusiveLock(proc, tableName);
 
     proc = queue.poll();
     assertEquals(2, proc.getProcId());
@@ -757,12 +741,12 @@ public class TestMasterProcedureScheduler {
     // fetch and acquire the first shared-lock
     Procedure proc1 = queue.poll();
     assertEquals(1, proc1.getProcId());
-    assertEquals(true, queue.tryAcquireTableSharedLock(proc1, tableName));
+    assertEquals(false, queue.waitTableSharedLock(proc1, tableName));
 
     // fetch and acquire the second shared-lock
     Procedure proc2 = queue.poll();
     assertEquals(2, proc2.getProcId());
-    assertEquals(true, queue.tryAcquireTableSharedLock(proc2, tableName));
+    assertEquals(false, queue.waitTableSharedLock(proc2, tableName));
 
     // nothing available, until xlock release
     assertEquals(null, queue.poll(0));
@@ -778,8 +762,8 @@ public class TestMasterProcedureScheduler {
     assertEquals(2, proc2.getProcId());
 
     // release the xlock
-    queue.releaseTableSharedLock(proc1, tableName);
-    queue.releaseTableSharedLock(proc2, tableName);
+    queue.wakeTableSharedLock(proc1, tableName);
+    queue.wakeTableSharedLock(proc2, tableName);
 
     Procedure proc3 = queue.poll();
     assertEquals(3, proc3.getProcId());


[48/50] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 1c373ee..23ddd43 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -141,6 +141,22 @@ message SplitTableRegionResponse {
   optional uint64 proc_id = 1;
 }
 
+message RegionSpaceUse {
+  optional RegionInfo region = 1; // A region identifier
+  optional uint64 size = 2; // The size in bytes of the region
+}
+
+/**
+ * Reports filesystem usage for regions.
+ */
+message RegionSpaceUseReportRequest {
+  repeated RegionSpaceUse space_use = 1;
+}
+
+message RegionSpaceUseReportResponse {
+
+}
+
 service RegionServerStatusService {
   /** Called when a region server first starts. */
   rpc RegionServerStartup(RegionServerStartupRequest)
@@ -182,4 +198,10 @@ service RegionServerStatusService {
    */
   rpc getProcedureResult(GetProcedureResultRequest)
     returns(GetProcedureResultResponse);
+
+  /**
+   * Reports Region filesystem space use
+   */
+  rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest)
+    returns(RegionSpaceUseReportResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 60b8b65..3a2c614 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -98,6 +99,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
@@ -1850,4 +1854,19 @@ public class MasterRpcServices extends RSRpcServices
       throw new ServiceException(e);
     }
   }
+
+  @Override
+  public RegionSpaceUseReportResponse reportRegionSpaceUse(RpcController controller,
+      RegionSpaceUseReportRequest request) throws ServiceException {
+    try {
+      master.checkInitialized();
+      MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
+      for (RegionSpaceUse report : request.getSpaceUseList()) {
+        quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), report.getSize());
+      }
+      return RegionSpaceUseReportResponse.newBuilder().build();
+    } catch (Exception e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
new file mode 100644
index 0000000..01540eb
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/FileSystemUtilizationChore.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * A chore which computes the size of each {@link HRegion} on the FileSystem hosted by the given {@link HRegionServer}.
+ */
+@InterfaceAudience.Private
+public class FileSystemUtilizationChore extends ScheduledChore {
+  private static final Log LOG = LogFactory.getLog(FileSystemUtilizationChore.class);
+  static final String FS_UTILIZATION_CHORE_PERIOD_KEY = "hbase.regionserver.quotas.fs.utilization.chore.period";
+  static final int FS_UTILIZATION_CHORE_PERIOD_DEFAULT = 1000 * 60 * 5; // 5 minutes in millis
+
+  static final String FS_UTILIZATION_CHORE_DELAY_KEY = "hbase.regionserver.quotas.fs.utilization.chore.delay";
+  static final long FS_UTILIZATION_CHORE_DELAY_DEFAULT = 1000L * 60L; // 1 minute
+
+  static final String FS_UTILIZATION_CHORE_TIMEUNIT_KEY = "hbase.regionserver.quotas.fs.utilization.chore.timeunit";
+  static final String FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT = TimeUnit.MILLISECONDS.name();
+
+  static final String FS_UTILIZATION_MAX_ITERATION_DURATION_KEY = "hbase.regionserver.quotas.fs.utilization.chore.max.iteration.millis";
+  static final long FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT = 5000L;
+
+  private final HRegionServer rs;
+  private final long maxIterationMillis;
+  private Iterator<Region> leftoverRegions;
+
+  public FileSystemUtilizationChore(HRegionServer rs) {
+    super(FileSystemUtilizationChore.class.getSimpleName(), rs, getPeriod(rs.getConfiguration()),
+        getInitialDelay(rs.getConfiguration()), getTimeUnit(rs.getConfiguration()));
+    this.rs = rs;
+    this.maxIterationMillis = rs.getConfiguration().getLong(
+        FS_UTILIZATION_MAX_ITERATION_DURATION_KEY, FS_UTILIZATION_MAX_ITERATION_DURATION_DEFAULT);
+  }
+
+  @Override
+  protected void chore() {
+    final Map<HRegionInfo,Long> onlineRegionSizes = new HashMap<>();
+    final Set<Region> onlineRegions = new HashSet<>(rs.getOnlineRegions());
+    // Process the regions from the last run if we have any. If we are somehow having difficulty
+    // processing the Regions, we want to avoid creating a backlog in memory of Region objs.
+    Iterator<Region> oldRegionsToProcess = getLeftoverRegions();
+    final Iterator<Region> iterator;
+    final boolean processingLeftovers;
+    if (null == oldRegionsToProcess) {
+      iterator = onlineRegions.iterator();
+      processingLeftovers = false;
+    } else {
+      iterator = oldRegionsToProcess;
+      processingLeftovers = true;
+    }
+    // Reset the leftoverRegions and let the loop re-assign if necessary.
+    setLeftoverRegions(null);
+    long regionSizesCalculated = 0L;
+    long offlineRegionsSkipped = 0L;
+    long skippedSplitParents = 0L;
+    long skippedRegionReplicas = 0L;
+    final long start = EnvironmentEdgeManager.currentTime();
+    while (iterator.hasNext()) {
+      // Make sure this chore doesn't hog the thread.
+      long timeRunning = EnvironmentEdgeManager.currentTime() - start;
+      if (timeRunning > maxIterationMillis) {
+        LOG.debug("Preempting execution of FileSystemUtilizationChore because it exceeds the"
+            + " maximum iteration configuration value. Will process remaining iterators"
+            + " on a subsequent invocation.");
+        setLeftoverRegions(iterator);
+        break;
+      }
+
+      final Region region = iterator.next();
+      // If we're processing leftover regions, the region may no-longer be online.
+      // If so, we can skip it.
+      if (processingLeftovers && !onlineRegions.contains(region)) {
+        offlineRegionsSkipped++;
+        continue;
+      }
+      // Avoid computing the size of regions which are the parent of split.
+      if (region.getRegionInfo().isSplitParent()) {
+        skippedSplitParents++;
+        continue;
+      }
+      // Avoid computing the size of region replicas.
+      if (HRegionInfo.DEFAULT_REPLICA_ID != region.getRegionInfo().getReplicaId()) {
+        skippedRegionReplicas++;
+        continue;
+      }
+      final long sizeInBytes = computeSize(region);
+      onlineRegionSizes.put(region.getRegionInfo(), sizeInBytes);
+      regionSizesCalculated++;
+    }
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Computed the size of " + regionSizesCalculated + " Regions. Skipped computation"
+          + " of " + offlineRegionsSkipped + " regions due to not being online on this RS, "
+          + skippedSplitParents + " regions due to being the parent of a split, and"
+          + skippedRegionReplicas + " regions due to being region replicas.");
+    }
+    reportRegionSizesToMaster(onlineRegionSizes);
+  }
+
+  /**
+   * Returns an {@link Iterator} over the Regions which were skipped last invocation of the chore.
+   *
+   * @return Regions from the previous invocation to process, or null.
+   */
+  Iterator<Region> getLeftoverRegions() {
+    return leftoverRegions;
+  }
+
+  /**
+   * Sets a new collection of Regions as leftovers.
+   */
+  void setLeftoverRegions(Iterator<Region> newLeftovers) {
+    this.leftoverRegions = newLeftovers;
+  }
+
+  /**
+   * Computes total FileSystem size for the given {@link Region}.
+   *
+   * @param r The region
+   * @return The size, in bytes, of the Region.
+   */
+  long computeSize(Region r) {
+    long regionSize = 0L;
+    for (Store store : r.getStores()) {
+      // StoreFile/StoreFileReaders are already instantiated with the file length cached.
+      // Can avoid extra NN ops.
+      regionSize += store.getStorefilesSize();
+    }
+    return regionSize;
+  }
+
+  /**
+   * Reports the computed region sizes to the currently active Master.
+   *
+   * @param onlineRegionSizes The computed region sizes to report.
+   */
+  void reportRegionSizesToMaster(Map<HRegionInfo,Long> onlineRegionSizes) {
+    this.rs.reportRegionSizesForQuotas(onlineRegionSizes);
+  }
+
+  /**
+   * Extracts the period for the chore from the configuration.
+   *
+   * @param conf The configuration object.
+   * @return The configured chore period or the default value.
+   */
+  static int getPeriod(Configuration conf) {
+    return conf.getInt(FS_UTILIZATION_CHORE_PERIOD_KEY, FS_UTILIZATION_CHORE_PERIOD_DEFAULT);
+  }
+
+  /**
+   * Extracts the initial delay for the chore from the configuration.
+   *
+   * @param conf The configuration object.
+   * @return The configured chore initial delay or the default value.
+   */
+  static long getInitialDelay(Configuration conf) {
+    return conf.getLong(FS_UTILIZATION_CHORE_DELAY_KEY, FS_UTILIZATION_CHORE_DELAY_DEFAULT);
+  }
+
+  /**
+   * Extracts the time unit for the chore period and initial delay from the configuration. The
+   * configuration value for {@link #FS_UTILIZATION_CHORE_TIMEUNIT_KEY} must correspond to a
+   * {@link TimeUnit} value.
+   *
+   * @param conf The configuration object.
+   * @return The configured time unit for the chore period and initial delay or the default value.
+   */
+  static TimeUnit getTimeUnit(Configuration conf) {
+    return TimeUnit.valueOf(conf.get(FS_UTILIZATION_CHORE_TIMEUNIT_KEY,
+        FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
index bd9f410..fc24e52 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/quotas/MasterQuotaManager.java
@@ -19,7 +19,10 @@
 package org.apache.hadoop.hbase.quotas;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -62,6 +65,7 @@ public class MasterQuotaManager implements RegionStateListener {
   private NamedLock<String> userLocks;
   private boolean enabled = false;
   private NamespaceAuditor namespaceQuotaManager;
+  private ConcurrentHashMap<HRegionInfo, Long> regionSizes;
 
   public MasterQuotaManager(final MasterServices masterServices) {
     this.masterServices = masterServices;
@@ -85,6 +89,7 @@ public class MasterQuotaManager implements RegionStateListener {
     namespaceLocks = new NamedLock<String>();
     tableLocks = new NamedLock<TableName>();
     userLocks = new NamedLock<String>();
+    regionSizes = new ConcurrentHashMap<>();
 
     namespaceQuotaManager = new NamespaceAuditor(masterServices);
     namespaceQuotaManager.start();
@@ -515,5 +520,15 @@ public class MasterQuotaManager implements RegionStateListener {
       this.namespaceQuotaManager.removeRegionFromNamespaceUsage(hri);
     }
   }
+
+  public void addRegionSize(HRegionInfo hri, long size) {
+    // TODO Make proper API
+    regionSizes.put(hri, size);
+  }
+
+  public Map<HRegionInfo, Long> snapshotRegionSizes() {
+    // TODO Make proper API
+    return new HashMap<>(regionSizes);
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index ceed050..591c909 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -37,6 +37,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.SortedMap;
@@ -73,6 +74,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HealthCheckChore;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
@@ -116,6 +118,7 @@ import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.mob.MobCacheConfig;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
+import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionConfiguration;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
@@ -151,12 +154,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
@@ -508,6 +514,8 @@ public class HRegionServer extends HasThread implements
 
   protected SecureBulkLoadManager secureBulkLoadManager;
 
+  protected FileSystemUtilizationChore fsUtilizationChore;
+
   /**
    * Starts a HRegionServer at the default location.
    */
@@ -917,6 +925,8 @@ public class HRegionServer extends HasThread implements
     // Setup the Quota Manager
     rsQuotaManager = new RegionServerQuotaManager(this);
 
+    this.fsUtilizationChore = new FileSystemUtilizationChore(this);
+
     // Setup RPC client for master communication
     rpcClient = RpcClientFactory.createClient(conf, clusterId, new InetSocketAddress(
         rpcServices.isa.getAddress(), 0), clusterConnection.getConnectionMetrics());
@@ -1232,6 +1242,66 @@ public class HRegionServer extends HasThread implements
     }
   }
 
+  /**
+   * Reports the given map of Regions and their size on the filesystem to the active Master.
+   *
+   * @param onlineRegionSizes A map of region info to size in bytes
+   */
+  public void reportRegionSizesForQuotas(final Map<HRegionInfo, Long> onlineRegionSizes) {
+    RegionServerStatusService.BlockingInterface rss = rssStub;
+    if (rss == null) {
+      // the current server could be stopping.
+      LOG.trace("Skipping Region size report to HMaster as stub is null");
+      return;
+    }
+    try {
+      RegionSpaceUseReportRequest request = buildRegionSpaceUseReportRequest(
+          Objects.requireNonNull(onlineRegionSizes));
+      rss.reportRegionSpaceUse(null, request);
+    } catch (ServiceException se) {
+      IOException ioe = ProtobufUtil.getRemoteException(se);
+      if (ioe instanceof PleaseHoldException) {
+        LOG.trace("Failed to report region sizes to Master because it is initializing. This will be retried.", ioe);
+        // The Master is coming up. Will retry the report later. Avoid re-creating the stub.
+        return;
+      }
+      LOG.debug("Failed to report region sizes to Master. This will be retried.", ioe);
+      if (rssStub == rss) {
+        rssStub = null;
+      }
+      createRegionServerStatusStub(true);
+    }
+  }
+
+  /**
+   * Builds a {@link RegionSpaceUseReportRequest} protobuf message from the region size map.
+   *
+   * @param regionSizes Map of region info to size in bytes.
+   * @return The corresponding protocol buffer message.
+   */
+  RegionSpaceUseReportRequest buildRegionSpaceUseReportRequest(Map<HRegionInfo,Long> regionSizes) {
+    RegionSpaceUseReportRequest.Builder request = RegionSpaceUseReportRequest.newBuilder();
+    for (Entry<HRegionInfo, Long> entry : Objects.requireNonNull(regionSizes).entrySet()) {
+      request.addSpaceUse(convertRegionSize(entry.getKey(), entry.getValue()));
+    }
+    return request.build();
+  }
+
+  /**
+   * Converts a pair of {@link HRegionInfo} and {@code long} into a {@link RegionSpaceUse}
+   * protobuf message.
+   *
+   * @param regionInfo The HRegionInfo
+   * @param sizeInBytes The size in bytes of the Region
+   * @return The protocol buffer
+   */
+  RegionSpaceUse convertRegionSize(HRegionInfo regionInfo, Long sizeInBytes) {
+    return RegionSpaceUse.newBuilder()
+        .setRegion(HRegionInfo.convert(Objects.requireNonNull(regionInfo)))
+        .setSize(Objects.requireNonNull(sizeInBytes))
+        .build();
+  }
+
   ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime)
       throws IOException {
     // We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
@@ -1806,6 +1876,7 @@ public class HRegionServer extends HasThread implements
     if (this.nonceManagerChore != null) choreService.scheduleChore(nonceManagerChore);
     if (this.storefileRefresher != null) choreService.scheduleChore(storefileRefresher);
     if (this.movedRegionsCleaner != null) choreService.scheduleChore(movedRegionsCleaner);
+    if (this.fsUtilizationChore != null) choreService.scheduleChore(fsUtilizationChore);
 
     // Leases is not a Thread. Internally it runs a daemon thread. If it gets
     // an unhandled exception, it will just exit.
@@ -2310,6 +2381,7 @@ public class HRegionServer extends HasThread implements
     if (this.healthCheckChore != null) healthCheckChore.cancel(true);
     if (this.storefileRefresher != null) storefileRefresher.cancel(true);
     if (this.movedRegionsCleaner != null) movedRegionsCleaner.cancel(true);
+    if (this.fsUtilizationChore != null) fsUtilizationChore.cancel(true);
 
     if (this.cacheFlusher != null) {
       this.cacheFlusher.join();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
new file mode 100644
index 0000000..ad98720
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestFileSystemUtilizationChore.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+/**
+ * Test class for {@link FileSystemUtilizationChore}.
+ */
+@Category(SmallTests.class)
+public class TestFileSystemUtilizationChore {
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testNoOnlineRegions() {
+    // One region with a store size of one.
+    final List<Long> regionSizes = Collections.emptyList();
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes)))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    final Region region = mockRegionWithSize(regionSizes);
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region));
+    chore.chore();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testRegionSizes() {
+    // One region with a store size of one.
+    final List<Long> regionSizes = Arrays.asList(1024L);
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(regionSizes)))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    final Region region = mockRegionWithSize(regionSizes);
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(region));
+    chore.chore();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testMultipleRegionSizes() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+
+    // Three regions with multiple store sizes
+    final List<Long> r1Sizes = Arrays.asList(1024L, 2048L);
+    final long r1Sum = sum(r1Sizes);
+    final List<Long> r2Sizes = Arrays.asList(1024L * 1024L);
+    final long r2Sum = sum(r2Sizes);
+    final List<Long> r3Sizes = Arrays.asList(10L * 1024L * 1024L);
+    final long r3Sum = sum(r3Sizes);
+
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum, r2Sum, r3Sum))))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    final Region r1 = mockRegionWithSize(r1Sizes);
+    final Region r2 = mockRegionWithSize(r2Sizes);
+    final Region r3 = mockRegionWithSize(r3Sizes);
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3));
+    chore.chore();
+  }
+
+  @Test
+  public void testDefaultConfigurationProperties() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    // Verify that the expected default values are actually represented.
+    assertEquals(
+        FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_DEFAULT, chore.getPeriod());
+    assertEquals(
+        FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_DEFAULT, chore.getInitialDelay());
+    assertEquals(
+        TimeUnit.valueOf(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_DEFAULT),
+        chore.getTimeUnit());
+  }
+
+  @Test
+  public void testNonDefaultConfigurationProperties() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    // Override the default values
+    final int period = 60 * 10;
+    final long delay = 30L;
+    final TimeUnit timeUnit = TimeUnit.SECONDS;
+    conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, period);
+    conf.setLong(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, delay);
+    conf.set(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_TIMEUNIT_KEY, timeUnit.name());
+
+    // Verify that the chore reports these non-default values
+    final HRegionServer rs = mockRegionServer(conf);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    assertEquals(period, chore.getPeriod());
+    assertEquals(delay, chore.getInitialDelay());
+    assertEquals(timeUnit, chore.getTimeUnit());
+  }
+
+  @Test
+  public void testProcessingLeftoverRegions() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+
+    // Some leftover regions from a previous chore()
+    final List<Long> leftover1Sizes = Arrays.asList(1024L, 4096L);
+    final long leftover1Sum = sum(leftover1Sizes);
+    final List<Long> leftover2Sizes = Arrays.asList(2048L);
+    final long leftover2Sum = sum(leftover2Sizes);
+
+    final Region lr1 = mockRegionWithSize(leftover1Sizes);
+    final Region lr2 = mockRegionWithSize(leftover2Sizes);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs) {
+      @Override
+      Iterator<Region> getLeftoverRegions() {
+        return Arrays.asList(lr1, lr2).iterator();
+      }
+    };
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum, leftover2Sum))))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    // We shouldn't compute all of these region sizes, just the leftovers
+    final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
+    final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
+    final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1, lr2));
+
+    chore.chore();
+  }
+
+  @Test
+  public void testProcessingNowOfflineLeftoversAreIgnored() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+
+    // Some leftover regions from a previous chore()
+    final List<Long> leftover1Sizes = Arrays.asList(1024L, 4096L);
+    final long leftover1Sum = sum(leftover1Sizes);
+    final List<Long> leftover2Sizes = Arrays.asList(2048L);
+    final long leftover2Sum = sum(leftover2Sizes);
+
+    final Region lr1 = mockRegionWithSize(leftover1Sizes);
+    final Region lr2 = mockRegionWithSize(leftover2Sizes);
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs) {
+      @Override
+      Iterator<Region> getLeftoverRegions() {
+        return Arrays.asList(lr1, lr2).iterator();
+      }
+    };
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(leftover1Sum))))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    // We shouldn't compute all of these region sizes, just the leftovers
+    final Region r1 = mockRegionWithSize(Arrays.asList(1024L, 2048L));
+    final Region r2 = mockRegionWithSize(Arrays.asList(1024L * 1024L));
+    final Region r3 = mockRegionWithSize(Arrays.asList(10L * 1024L * 1024L));
+    // lr2 is no longer online, so it should be ignored
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2, r3, lr1));
+
+    chore.chore();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testIgnoreSplitParents() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+
+    // Three regions with multiple store sizes
+    final List<Long> r1Sizes = Arrays.asList(1024L, 2048L);
+    final long r1Sum = sum(r1Sizes);
+    final List<Long> r2Sizes = Arrays.asList(1024L * 1024L);
+
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum))))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    final Region r1 = mockRegionWithSize(r1Sizes);
+    final Region r2 = mockSplitParentRegionWithSize(r2Sizes);
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2));
+    chore.chore();
+  }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testIgnoreRegionReplicas() {
+    final Configuration conf = getDefaultHBaseConfiguration();
+    final HRegionServer rs = mockRegionServer(conf);
+
+    // Three regions with multiple store sizes
+    final List<Long> r1Sizes = Arrays.asList(1024L, 2048L);
+    final long r1Sum = sum(r1Sizes);
+    final List<Long> r2Sizes = Arrays.asList(1024L * 1024L);
+
+    final FileSystemUtilizationChore chore = new FileSystemUtilizationChore(rs);
+    doAnswer(new ExpectedRegionSizeSummationAnswer(sum(Arrays.asList(r1Sum))))
+        .when(rs)
+        .reportRegionSizesForQuotas((Map<HRegionInfo,Long>) any(Map.class));
+
+    final Region r1 = mockRegionWithSize(r1Sizes);
+    final Region r2 = mockRegionReplicaWithSize(r2Sizes);
+    when(rs.getOnlineRegions()).thenReturn(Arrays.asList(r1, r2));
+    chore.chore();
+  }
+
+  /**
+   * Creates an HBase Configuration object for the default values.
+   */
+  private Configuration getDefaultHBaseConfiguration() {
+    final Configuration conf = HBaseConfiguration.create();
+    conf.addResource("hbase-default.xml");
+    return conf;
+  }
+
+  /**
+   * Creates an HRegionServer using the given Configuration.
+   */
+  private HRegionServer mockRegionServer(Configuration conf) {
+    final HRegionServer rs = mock(HRegionServer.class);
+    when(rs.getConfiguration()).thenReturn(conf);
+    return rs;
+  }
+
+  /**
+   * Sums the collection of non-null numbers.
+   */
+  private long sum(Collection<Long> values) {
+    long sum = 0L;
+    for (Long value : values) {
+      assertNotNull(value);
+      sum += value;
+    }
+    return sum;
+  }
+
+  /**
+   * Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each
+   * {@link Store} will have a reported size corresponding to the element in {@code storeSizes}.
+   *
+   * @param storeSizes A list of sizes for each Store.
+   * @return A mocked Region.
+   */
+  private Region mockRegionWithSize(Collection<Long> storeSizes) {
+    final Region r = mock(Region.class);
+    final HRegionInfo info = mock(HRegionInfo.class);
+    when(r.getRegionInfo()).thenReturn(info);
+    List<Store> stores = new ArrayList<>();
+    when(r.getStores()).thenReturn(stores);
+    for (Long storeSize : storeSizes) {
+      final Store s = mock(Store.class);
+      stores.add(s);
+      when(s.getStorefilesSize()).thenReturn(storeSize);
+    }
+    return r;
+  }
+
+  /**
+   * Creates a region which is the parent of a split.
+   *
+   * @param storeSizes A list of sizes for each Store.
+   * @return A mocked Region.
+   */
+  private Region mockSplitParentRegionWithSize(Collection<Long> storeSizes) {
+    final Region r = mockRegionWithSize(storeSizes);
+    final HRegionInfo info = r.getRegionInfo();
+    when(info.isSplitParent()).thenReturn(true);
+    return r;
+  }
+
+  /**
+   * Creates a region who has a replicaId of <code>1</code>.
+   *
+   * @param storeSizes A list of sizes for each Store.
+   * @return A mocked Region.
+   */
+  private Region mockRegionReplicaWithSize(Collection<Long> storeSizes) {
+    final Region r = mockRegionWithSize(storeSizes);
+    final HRegionInfo info = r.getRegionInfo();
+    when(info.getReplicaId()).thenReturn(1);
+    return r;
+  }
+
+  /**
+   * An Answer implementation which verifies the sum of the Region sizes to report is as expected.
+   */
+  private static class ExpectedRegionSizeSummationAnswer implements Answer<Void> {
+    private final long expectedSize;
+
+    public ExpectedRegionSizeSummationAnswer(long expectedSize) {
+      this.expectedSize = expectedSize;
+    }
+
+    @Override
+    public Void answer(InvocationOnMock invocation) throws Throwable {
+      Object[] args = invocation.getArguments();
+      assertEquals(1, args.length);
+      @SuppressWarnings("unchecked")
+      Map<HRegionInfo,Long> regionSizes = (Map<HRegionInfo,Long>) args[0];
+      long sum = 0L;
+      for (Long regionSize : regionSizes.values()) {
+        sum += regionSize;
+      }
+      assertEquals(expectedSize, sum);
+      return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java
new file mode 100644
index 0000000..ed8a2f3
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestRegionSizeUse.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Test class which verifies that region sizes are reported to the master.
+ */
+@Category(MediumTests.class)
+public class TestRegionSizeUse {
+  private static final Log LOG = LogFactory.getLog(TestRegionSizeUse.class);
+  private static final int SIZE_PER_VALUE = 256;
+  private static final int NUM_SPLITS = 10;
+  private static final String F1 = "f1";
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+
+  private MiniHBaseCluster cluster;
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @Before
+  public void setUp() throws Exception {
+    Configuration conf = TEST_UTIL.getConfiguration();
+    conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_DELAY_KEY, 1000);
+    conf.setInt(FileSystemUtilizationChore.FS_UTILIZATION_CHORE_PERIOD_KEY, 1000);
+    conf.setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
+    cluster = TEST_UTIL.startMiniCluster(2);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testBasicRegionSizeReports() throws Exception {
+    final long bytesWritten = 5L * 1024L * 1024L; // 5MB
+    final TableName tn = writeData(bytesWritten);
+    LOG.debug("Data was written to HBase");
+    final Admin admin = TEST_UTIL.getAdmin();
+    // Push the data to disk.
+    admin.flush(tn);
+    LOG.debug("Data flushed to disk");
+    // Get the final region distribution
+    final List<HRegionInfo> regions = TEST_UTIL.getAdmin().getTableRegions(tn);
+
+    HMaster master = cluster.getMaster();
+    MasterQuotaManager quotaManager = master.getMasterQuotaManager();
+    Map<HRegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
+    // Wait until we get all of the region reports for our table
+    // The table may split, so make sure we have at least as many as expected right after we
+    // finished writing the data.
+    int observedRegions = numRegionsForTable(tn, regionSizes);
+    while (observedRegions < regions.size()) {
+      LOG.debug("Expecting more regions. Saw " + observedRegions
+          + " region sizes reported, expected at least " + regions.size());
+      Thread.sleep(1000);
+      regionSizes = quotaManager.snapshotRegionSizes();
+      observedRegions = numRegionsForTable(tn, regionSizes);
+    }
+
+    LOG.debug("Observed region sizes by the HMaster: " + regionSizes);
+    long totalRegionSize = 0L;
+    for (Long regionSize : regionSizes.values()) {
+      totalRegionSize += regionSize;
+    }
+    assertTrue("Expected region size report to exceed " + bytesWritten + ", but was "
+        + totalRegionSize + ". RegionSizes=" + regionSizes, bytesWritten < totalRegionSize);
+  }
+
+  /**
+   * Writes at least {@code sizeInBytes} bytes of data to HBase and returns the TableName used.
+   *
+   * @param sizeInBytes The amount of data to write in bytes.
+   * @return The table the data was written to
+   */
+  private TableName writeData(long sizeInBytes) throws IOException {
+    final Connection conn = TEST_UTIL.getConnection();
+    final Admin admin = TEST_UTIL.getAdmin();
+    final TableName tn = TableName.valueOf(testName.getMethodName());
+
+    // Delete the old table
+    if (admin.tableExists(tn)) {
+      admin.disableTable(tn);
+      admin.deleteTable(tn);
+    }
+
+    // Create the table
+    HTableDescriptor tableDesc = new HTableDescriptor(tn);
+    tableDesc.addFamily(new HColumnDescriptor(F1));
+    admin.createTable(tableDesc, Bytes.toBytes("1"), Bytes.toBytes("9"), NUM_SPLITS);
+
+    final Table table = conn.getTable(tn);
+    try {
+      List<Put> updates = new ArrayList<>();
+      long bytesToWrite = sizeInBytes;
+      long rowKeyId = 0L;
+      final StringBuilder sb = new StringBuilder();
+      final Random r = new Random();
+      while (bytesToWrite > 0L) {
+        sb.setLength(0);
+        sb.append(Long.toString(rowKeyId));
+        // Use the reverse counter as the rowKey to get even spread across all regions
+        Put p = new Put(Bytes.toBytes(sb.reverse().toString()));
+        byte[] value = new byte[SIZE_PER_VALUE];
+        r.nextBytes(value);
+        p.addColumn(Bytes.toBytes(F1), Bytes.toBytes("q1"), value);
+        updates.add(p);
+
+        // Batch 50K worth of updates
+        if (updates.size() > 50) {
+          table.put(updates);
+          updates.clear();
+        }
+
+        // Just count the value size, ignore the size of rowkey + column
+        bytesToWrite -= SIZE_PER_VALUE;
+        rowKeyId++;
+      }
+
+      // Write the final batch
+      if (!updates.isEmpty()) {
+        table.put(updates);
+      }
+
+      return tn;
+    } finally {
+      table.close();
+    }
+  }
+
+  /**
+   * Computes the number of regions for the given table that have a positive size.
+   *
+   * @param tn The TableName in question
+   * @param regions A collection of region sizes
+   * @return The number of regions for the given table.
+   */
+  private int numRegionsForTable(TableName tn, Map<HRegionInfo,Long> regions) {
+    int sum = 0;
+    for (Entry<HRegionInfo,Long> entry : regions.entrySet()) {
+      if (tn.equals(entry.getKey().getTable()) && 0 < entry.getValue()) {
+        sum++;
+      }
+    }
+    return sum;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
new file mode 100644
index 0000000..3244681
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerRegionSpaceUseReport.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.mock;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test class for isolated (non-cluster) tests surrounding the report
+ * of Region space use to the Master by RegionServers.
+ */
+@Category(SmallTests.class)
+public class TestRegionServerRegionSpaceUseReport {
+
+  @Test
+  public void testConversion() {
+    TableName tn = TableName.valueOf("table1");
+    HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b"));
+    HRegionInfo hri2 = new HRegionInfo(tn, Bytes.toBytes("b"), Bytes.toBytes("c"));
+    HRegionInfo hri3 = new HRegionInfo(tn, Bytes.toBytes("c"), Bytes.toBytes("d"));
+    Map<HRegionInfo,Long> sizes = new HashMap<>();
+    sizes.put(hri1, 1024L * 1024L);
+    sizes.put(hri2, 1024L * 1024L * 8L);
+    sizes.put(hri3, 1024L * 1024L * 32L);
+
+    // Call the real method to convert the map into a protobuf
+    HRegionServer rs = mock(HRegionServer.class);
+    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
+    doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+
+    RegionSpaceUseReportRequest requests = rs.buildRegionSpaceUseReportRequest(sizes);
+    assertEquals(sizes.size(), requests.getSpaceUseCount());
+    for (RegionSpaceUse spaceUse : requests.getSpaceUseList()) {
+      RegionInfo ri = spaceUse.getRegion();
+      HRegionInfo hri = HRegionInfo.convert(ri);
+      Long expectedSize = sizes.remove(hri);
+      assertNotNull("Could not find size for HRI: " + hri, expectedSize);
+      assertEquals(expectedSize.longValue(), spaceUse.getSize());
+    }
+    assertTrue("Should not have any space use entries left: " + sizes, sizes.isEmpty());
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testNullMap() {
+    // Call the real method to convert the map into a protobuf
+    HRegionServer rs = mock(HRegionServer.class);
+    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
+    doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+
+    rs.buildRegionSpaceUseReportRequest(null);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testMalformedMap() {
+    TableName tn = TableName.valueOf("table1");
+    HRegionInfo hri1 = new HRegionInfo(tn, Bytes.toBytes("a"), Bytes.toBytes("b"));
+    Map<HRegionInfo,Long> sizes = new HashMap<>();
+    sizes.put(hri1, null);
+
+    // Call the real method to convert the map into a protobuf
+    HRegionServer rs = mock(HRegionServer.class);
+    doCallRealMethod().when(rs).buildRegionSpaceUseReportRequest(any(Map.class));
+    doCallRealMethod().when(rs).convertRegionSize(any(HRegionInfo.class), anyLong());
+
+    rs.buildRegionSpaceUseReportRequest(sizes);
+  }
+}


[34/50] [abbrv] hbase git commit: HBASE-17502 Document hadoop pre-2.6.1 and Java 1.8 Kerberos problem in our hadoop support matrix

Posted by el...@apache.org.
HBASE-17502 Document hadoop pre-2.6.1 and Java 1.8 Kerberos problem in our hadoop support matrix


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a9e3df8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a9e3df8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a9e3df8

Branch: refs/heads/HBASE-16961
Commit: 9a9e3df8565a8ba36779a442907a38772ab59a72
Parents: fb8f924
Author: Jerry He <je...@apache.org>
Authored: Sat Jan 21 13:24:33 2017 -0800
Committer: Jerry He <je...@apache.org>
Committed: Sat Jan 21 13:24:33 2017 -0800

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/configuration.adoc | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a9e3df8/src/main/asciidoc/_chapters/configuration.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/configuration.adoc b/src/main/asciidoc/_chapters/configuration.adoc
index baa4d4c..d189c9f 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -243,6 +243,15 @@ Use the following legend to interpret this table:
 |Hadoop-2.7.1+ | NT | NT | NT | NT | S | S | S
 |===
 
+.Hadoop Pre-2.6.1 and JDK 1.8 Kerberos
+[TIP]
+====
+When using pre-2.6.1 Hadoop versions and JDK 1.8 in a Kerberos environment, HBase server can fail
+and abort due to Kerberos keytab relogin error. Late version of JDK 1.7 (1.7.0_80) has the problem too.
+Refer to link:https://issues.apache.org/jira/browse/HADOOP-10786[HADOOP-10786] for additional details.
+Consider upgrading to Hadoop 2.6.1+ in this case.
+====
+
 .Hadoop 2.6.x
 [TIP]
 ====


[26/50] [abbrv] hbase git commit: HBASE-17497 Add first async MetaTableAccessor impl and Implement tableExists method

Posted by el...@apache.org.
HBASE-17497 Add first async MetaTableAccessor impl and Implement tableExists method

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ee3c73f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ee3c73f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ee3c73f

Branch: refs/heads/HBASE-16961
Commit: 2ee3c73f76ea80d50fd921fa8cbe84d21f376785
Parents: 31f3e8f
Author: Guanghao Zhang <zg...@apache.org>
Authored: Thu Jan 19 18:35:19 2017 +0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 09:24:06 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/AsyncMetaTableAccessor.java    | 113 +++++++++++++++++++
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |   8 ++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java    |   7 ++
 .../hadoop/hbase/client/TestAsyncAdmin.java     |  16 +++
 4 files changed, 144 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ee3c73f/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
new file mode 100644
index 0000000..9187473
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import static org.apache.hadoop.hbase.TableName.META_TABLE_NAME;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.AsyncConnection;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RawAsyncTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+
+/**
+ * The asynchronous meta table accessor. Used to read/write region and assignment information store
+ * in <code>hbase:meta</code>.
+ */
+@InterfaceAudience.Private
+public class AsyncMetaTableAccessor {
+
+  private static final Log LOG = LogFactory.getLog(AsyncMetaTableAccessor.class);
+
+  private static CompletableFuture<RawAsyncTable> getMetaTable(AsyncConnection conn) {
+    return CompletableFuture.completedFuture(conn.getRawTable(META_TABLE_NAME));
+  }
+
+  public static CompletableFuture<Boolean> tableExists(AsyncConnection conn, TableName tableName) {
+    if (tableName.equals(META_TABLE_NAME)) {
+      return CompletableFuture.completedFuture(true);
+    }
+    return getTableState(conn, tableName).thenApply(Optional::isPresent);
+  }
+
+  private static CompletableFuture<Optional<TableState>> getTableState(AsyncConnection conn,
+      TableName tableName) {
+    CompletableFuture<Optional<TableState>> future = new CompletableFuture<>();
+    getMetaTable(conn).thenAccept((metaTable) -> {
+      Get get = new Get(tableName.getName()).addColumn(getTableFamily(), getStateColumn());
+      long time = EnvironmentEdgeManager.currentTime();
+      try {
+        get.setTimeRange(0, time);
+      } catch (IOException ioe) {
+        future.completeExceptionally(ioe);
+        return;
+      }
+      metaTable.get(get).whenComplete((result, error) -> {
+        if (error != null) {
+          future.completeExceptionally(error);
+          return;
+        }
+        try {
+          future.complete(getTableState(result));
+        } catch (IOException e) {
+          future.completeExceptionally(e);
+        }
+      });
+    });
+    return future;
+  }
+
+  private static Optional<TableState> getTableState(Result r) throws IOException {
+    Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn());
+    if (cell == null) return Optional.empty();
+    try {
+      return Optional.of(TableState.parseFrom(
+        TableName.valueOf(r.getRow()),
+        Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset()
+            + cell.getValueLength())));
+    } catch (DeserializationException e) {
+      throw new IOException("Failed to parse table state from result: " + r, e);
+    }
+  }
+
+  /**
+   * Returns the column family used for table columns.
+   * @return HConstants.TABLE_FAMILY.
+   */
+  private static byte[] getTableFamily() {
+    return HConstants.TABLE_FAMILY;
+  }
+
+  /**
+   * Returns the column qualifier for serialized table state
+   * @return HConstants.TABLE_STATE_QUALIFIER
+   */
+  private static byte[] getStateColumn() {
+    return HConstants.TABLE_STATE_QUALIFIER;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ee3c73f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index fadeebe..1ce23b9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.client;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 
@@ -31,6 +32,13 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 public interface AsyncAdmin {
 
   /**
+   * @param tableName Table to check.
+   * @return True if table exists already. The return value will be wrapped by a
+   *         {@link CompletableFuture}.
+   */
+  CompletableFuture<Boolean> tableExists(final TableName tableName);
+
+  /**
    * Turn the load balancer on or off.
    * @param on
    * @return Previous balancer value wrapped by a {@link CompletableFuture}.

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ee3c73f/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index 1dd92e5..9398972 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -21,6 +21,8 @@ import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hbase.AsyncMetaTableAccessor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder;
@@ -141,4 +143,9 @@ public class AsyncHBaseAdmin implements AsyncAdmin {
             (s, c, req, done) -> s.isBalancerEnabled(c, req, done), (resp) -> resp.getEnabled()))
         .call();
   }
+
+  @Override
+  public CompletableFuture<Boolean> tableExists(TableName tableName) {
+    return AsyncMetaTableAccessor.tableExists(connection, tableName);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/2ee3c73f/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
index 9beae1f..70b4cf2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdmin.java
@@ -23,8 +23,10 @@ import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -39,6 +41,7 @@ public class TestAsyncAdmin {
 
   private static final Log LOG = LogFactory.getLog(TestAdmin1.class);
   private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static byte [] FAMILY = Bytes.toBytes("testFamily");
 
   private static AsyncConnection ASYNC_CONN;
   private AsyncAdmin admin;
@@ -63,6 +66,19 @@ public class TestAsyncAdmin {
     this.admin = ASYNC_CONN.getAdmin();
   }
 
+  @Test
+  public void testTableExist() throws Exception {
+    final TableName table = TableName.valueOf("testTableExist");
+    boolean exist;
+    exist = admin.tableExists(table).get();
+    assertEquals(false, exist);
+    TEST_UTIL.createTable(table, FAMILY);
+    exist = admin.tableExists(table).get();
+    assertEquals(true, exist);
+    exist = admin.tableExists(TableName.META_TABLE_NAME).get();
+    assertEquals(true, exist);
+  }
+
   @Test(timeout = 30000)
   public void testBalancer() throws Exception {
     boolean initialState = admin.isBalancerEnabled().get();


[25/50] [abbrv] hbase git commit: HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands display an incorrect result size (Guangxu Cheng)

Posted by el...@apache.org.
HBASE-17496 RSGroup shell commands:get_server_rsgroup don't work and commands display an incorrect result size (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31f3e8f8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31f3e8f8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31f3e8f8

Branch: refs/heads/HBASE-16961
Commit: 31f3e8f833b65d87bb70dd47268213ff03be20b9
Parents: 4fdd6ff
Author: Michael Stack <st...@apache.org>
Authored: Fri Jan 20 09:20:31 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 09:20:31 2017 -0800

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb        | 3 +--
 hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb | 5 ++---
 hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb  | 3 +--
 hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb      | 3 +--
 4 files changed, 5 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/31f3e8f8/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
index ce4be71..ad8a0e3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
@@ -32,12 +32,11 @@ EOF
       end
 
       def command(group_name)
-        now = Time.now
         formatter.header(['GROUP INFORMATION'])
         rsgroup_admin.get_rsgroup(group_name) do |s|
           formatter.row([s])
         end
-        formatter.footer(now)
+        formatter.footer()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/31f3e8f8/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index a689a7c..9884cd1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -30,10 +30,9 @@ EOF
       end
 
       def command(server)
-        now = Time.now
-        group_name = rsgroup_admin.getRSGroupOfServer(server).getName
+        group_name = rsgroup_admin.get_rsgroup_of_server(server).getName
         formatter.row([group_name])
-        formatter.footer(now, 1)
+        formatter.footer(1)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/31f3e8f8/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
index d15cffa..650cda5 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_table_rsgroup.rb
@@ -30,11 +30,10 @@ EOF
       end
 
       def command(table)
-        now = Time.now
         group_name =
             rsgroup_admin.get_rsgroup_of_table(table).getName
         formatter.row([group_name])
-        formatter.footer(now, 1)
+        formatter.footer(1)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/31f3e8f8/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
index 6ea1d45..cabe84b 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_rsgroups.rb
@@ -34,7 +34,6 @@ EOF
       end
 
       def command(regex = '.*')
-        now = Time.now
         formatter.header(['GROUPS'])
 
         regex = /#{regex}/ unless regex.is_a?(Regexp)
@@ -43,7 +42,7 @@ EOF
           formatter.row([group])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
       end
     end
   end


[07/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index e8cf10c..338c80b 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -189,22 +189,6 @@ message UpdateFavoredNodesResponse {
   optional uint32 response = 1;
 }
 
-/**
- * Merges the specified regions.
- * <p>
- * This method currently closes the regions and then merges them
- */
-message MergeRegionsRequest {
-  required RegionSpecifier region_a = 1;
-  required RegionSpecifier region_b = 2;
-  optional bool forcible = 3 [default = false];
-  // wall clock time from master
-  optional uint64 master_system_time = 4;
-}
-
-message MergeRegionsResponse {
-}
-
 // Protocol buffer version of WAL for replication
 message WALEntry {
   required WALKey key = 1;
@@ -307,9 +291,6 @@ service AdminService {
   rpc CompactRegion(CompactRegionRequest)
     returns(CompactRegionResponse);
 
-  rpc MergeRegions(MergeRegionsRequest)
-    returns(MergeRegionsResponse);
-
   rpc ReplicateWALEntry(ReplicateWALEntryRequest)
     returns(ReplicateWALEntryResponse);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 7cd9921..e62f52c 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -80,21 +80,6 @@ message MoveRegionResponse {
 }
 
 /**
- * Dispatch merging the specified regions.
- */
-message DispatchMergingRegionsRequest {
-  required RegionSpecifier region_a = 1;
-  required RegionSpecifier region_b = 2;
-  optional bool forcible = 3 [default = false];
-  optional uint64 nonce_group = 4 [default = 0];
-  optional uint64 nonce = 5 [default = 0];
-}
-
-message DispatchMergingRegionsResponse {
-  optional uint64 proc_id = 1;
-}
-
-/**
  * Merging the specified regions in a table.
  */
 message MergeTableRegionsRequest {
@@ -625,10 +610,6 @@ service MasterService {
   rpc MoveRegion(MoveRegionRequest)
     returns(MoveRegionResponse);
 
- /** Master dispatch merging the regions */
-  rpc DispatchMergingRegions(DispatchMergingRegionsRequest)
-    returns(DispatchMergingRegionsResponse);
-
  /** Master merge the regions */
   rpc MergeTableRegions(MergeTableRegionsRequest)
     returns(MergeTableRegionsResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 23d914e..ef3f973 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -262,21 +262,6 @@ message RestoreSnapshotStateData {
   repeated RestoreParentToChildRegionsPair parent_to_child_regions_pair_list = 7;
 }
 
-enum DispatchMergingRegionsState {
-  DISPATCH_MERGING_REGIONS_PREPARE = 1;
-  DISPATCH_MERGING_REGIONS_PRE_OPERATION = 2;
-  DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS = 3;
-  DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS = 4;
-  DISPATCH_MERGING_REGIONS_POST_OPERATION = 5;
-}
-
-message DispatchMergingRegionsStateData {
-  required UserInformation user_info = 1;
-  required TableName table_name = 2;
-  repeated RegionInfo region_info = 3;
-  optional bool forcible = 4;
-}
-
 enum MergeTableRegionsState {
   MERGE_TABLE_REGIONS_PREPARE = 1;
   MERGE_TABLE_REGIONS_MOVE_REGION_TO_SAME_RS = 2;

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 00376cb..a0a1d49 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -59,11 +59,23 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
       HTableDescriptor desc, HRegionInfo[] regions) throws IOException {
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])}
+   */
+  @Deprecated
   @Override
   public void preDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException {
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])}
+   */
+  @Deprecated
   @Override
   public void postDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 461148b..98f21b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -111,11 +111,23 @@ public class BaseMasterObserver implements MasterObserver {
       final HRegionInfo[] regions) throws IOException {
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])}
+   */
+  @Deprecated
   @Override
   public void preDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException {
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])}
+   */
+  @Deprecated
   @Override
   public void postDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 82b3cfa..bb0e732 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -1699,7 +1699,11 @@ public interface MasterObserver extends Coprocessor {
    * @param regionA first region to be merged
    * @param regionB second region to be merged
    * @throws IOException if an error occurred on the coprocessor
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #preMergeRegions(ObserverContext, HRegionInfo[])}
    */
+  @Deprecated
   void preDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException;
 
@@ -1709,7 +1713,11 @@ public interface MasterObserver extends Coprocessor {
    * @param regionA first region to be merged
    * @param regionB second region to be merged
    * @throws IOException if an error occurred on the coprocessor
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   *   (<a href="https://issues.apache.org/jira/browse/HBASE-">HBASE-</a>).
+   *   Use {@link #postMergeRegions(ObserverContext, HRegionInfo[])}
    */
+  @Deprecated
   void postDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> c,
       final HRegionInfo regionA, final HRegionInfo regionB) throws IOException;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index a41960b..ab7a25e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -109,7 +109,6 @@ import org.apache.hadoop.hbase.master.procedure.CreateTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteColumnFamilyProcedure;
 import org.apache.hadoop.hbase.master.procedure.DeleteTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
-import org.apache.hadoop.hbase.master.procedure.DispatchMergingRegionsProcedure;
 import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -1419,55 +1418,6 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public long dispatchMergingRegions(
-      final HRegionInfo regionInfoA,
-      final HRegionInfo regionInfoB,
-      final boolean forcible,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    checkInitialized();
-
-    TableName tableName = regionInfoA.getTable();
-    if (tableName == null || regionInfoB.getTable() == null) {
-      throw new UnknownRegionException ("Can't merge regions without table associated");
-    }
-
-    if (!tableName.equals(regionInfoB.getTable())) {
-      throw new IOException ("Cannot merge regions from two different tables");
-    }
-
-    if (regionInfoA.compareTo(regionInfoB) == 0) {
-      throw new MergeRegionException(
-        "Cannot merge a region to itself " + regionInfoA + ", " + regionInfoB);
-    }
-
-    HRegionInfo [] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge [0] = regionInfoA;
-    regionsToMerge [1] = regionInfoB;
-
-    return MasterProcedureUtil.submitProcedure(
-        new MasterProcedureUtil.NonceProcedureRunnable(this, nonceGroup, nonce) {
-      @Override
-      protected void run() throws IOException {
-        getMaster().getMasterCoprocessorHost().preDispatchMerge(regionInfoA, regionInfoB);
-
-        LOG.info(getClientIdAuditPrefix() + " Merge regions "
-            + regionInfoA.getEncodedName() + " and " + regionInfoB.getEncodedName());
-
-        submitProcedure(new DispatchMergingRegionsProcedure(procedureExecutor.getEnvironment(),
-            tableName, regionsToMerge, forcible));
-
-        getMaster().getMasterCoprocessorHost().postDispatchMerge(regionInfoA, regionInfoB);
-      }
-
-      @Override
-      protected String getDescription() {
-        return "DisableTableProcedure";
-      }
-    });
-  }
-
-  @Override
   public long mergeRegions(
       final HRegionInfo[] regionsToMerge,
       final boolean forcible,

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 2f17a5f..3dec2e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -773,28 +773,6 @@ public class MasterCoprocessorHost
     });
   }
 
-  public void preDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-      throws IOException {
-    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-      @Override
-      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
-          throws IOException {
-        oserver.preDispatchMerge(ctx, regionInfoA, regionInfoB);
-      }
-    });
-  }
-
-  public void postDispatchMerge(final HRegionInfo regionInfoA, final HRegionInfo regionInfoB)
-      throws IOException {
-    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
-      @Override
-      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
-          throws IOException {
-        oserver.postDispatchMerge(ctx, regionInfoA, regionInfoB);
-      }
-    });
-  }
-
   public void preMergeRegions(final HRegionInfo[] regionsToMerge)
       throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 5873986..1151c92 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -501,52 +501,6 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public DispatchMergingRegionsResponse dispatchMergingRegions(RpcController c,
-      DispatchMergingRegionsRequest request) throws ServiceException {
-    try {
-      master.checkInitialized();
-    } catch (IOException ioe) {
-      throw new ServiceException(ioe);
-    }
-
-    final byte[] encodedNameOfRegionA = request.getRegionA().getValue()
-      .toByteArray();
-    final byte[] encodedNameOfRegionB = request.getRegionB().getValue()
-      .toByteArray();
-    if (request.getRegionA().getType() != RegionSpecifierType.ENCODED_REGION_NAME
-        || request.getRegionB().getType() != RegionSpecifierType.ENCODED_REGION_NAME) {
-      LOG.warn("mergeRegions specifier type: expected: "
-        + RegionSpecifierType.ENCODED_REGION_NAME + " actual: region_a="
-        + request.getRegionA().getType() + ", region_b="
-        + request.getRegionB().getType());
-    }
-
-    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
-    RegionState regionStateA = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionA));
-    RegionState regionStateB = regionStates.getRegionState(Bytes.toString(encodedNameOfRegionB));
-    if (regionStateA == null || regionStateB == null) {
-      throw new ServiceException(new UnknownRegionException(
-          Bytes.toStringBinary(regionStateA == null ? encodedNameOfRegionA
-              : encodedNameOfRegionB)));
-    }
-
-    final HRegionInfo regionInfoA = regionStateA.getRegion();
-    final HRegionInfo regionInfoB = regionStateB.getRegion();
-
-    try {
-      long procId = master.dispatchMergingRegions(
-        regionInfoA,
-        regionInfoB,
-        request.getForcible(),
-        request.getNonceGroup(),
-        request.getNonce());
-      return DispatchMergingRegionsResponse.newBuilder().setProcId(procId).build();
-    } catch (IOException ioe) {
-      throw new ServiceException(ioe);
-    }
-  }
-
-  @Override
   public EnableCatalogJanitorResponse enableCatalogJanitor(RpcController c,
       EnableCatalogJanitorRequest req) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 5019eda..79ebca5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -326,23 +326,6 @@ public interface MasterServices extends Server {
   boolean registerService(Service instance);
 
   /**
-   * Merge two regions. The real implementation is on the regionserver, master
-   * just move the regions together and send MERGE RPC to regionserver
-   * @param region_a region to merge
-   * @param region_b region to merge
-   * @param forcible true if do a compulsory merge, otherwise we will only merge
-   *          two adjacent regions
-   * @return procedure Id
-   * @throws IOException
-   */
-  long dispatchMergingRegions(
-    final HRegionInfo region_a,
-    final HRegionInfo region_b,
-    final boolean forcible,
-    final long nonceGroup,
-    final long nonce) throws IOException;
-
-  /**
    * @return true if master is the active one
    */
   boolean isActiveMaster();

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index f3b21ac..38493cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -896,37 +896,6 @@ public class ServerManager {
   }
 
   /**
-   * Sends an MERGE REGIONS RPC to the specified server to merge the specified
-   * regions.
-   * <p>
-   * A region server could reject the close request because it either does not
-   * have the specified region.
-   * @param server server to merge regions
-   * @param region_a region to merge
-   * @param region_b region to merge
-   * @param forcible true if do a compulsory merge, otherwise we will only merge
-   *          two adjacent regions
-   * @throws IOException
-   */
-  public void sendRegionsMerge(ServerName server, HRegionInfo region_a,
-      HRegionInfo region_b, boolean forcible, final User user) throws IOException {
-    if (server == null)
-      throw new NullPointerException("Passed server is null");
-    if (region_a == null || region_b == null)
-      throw new NullPointerException("Passed region is null");
-    AdminService.BlockingInterface admin = getRsAdmin(server);
-    if (admin == null) {
-      throw new IOException("Attempting to send MERGE REGIONS RPC to server "
-          + server.toString() + " for region "
-          + region_a.getRegionNameAsString() + ","
-          + region_b.getRegionNameAsString()
-          + " failed because no RPC connection found to this server");
-    }
-    HBaseRpcController controller = newRpcController();
-    ProtobufUtil.mergeRegions(controller, admin, region_a, region_b, forcible, user);
-  }
-
-  /**
    * Check if a region server is reachable and has the expected start code
    */
   public boolean isServerReachable(ServerName server) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
deleted file mode 100644
index ee92932..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/DispatchMergingRegionsProcedure.java
+++ /dev/null
@@ -1,579 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RegionLoad;
-import org.apache.hadoop.hbase.ServerLoad;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.UnknownRegionException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.exceptions.MergeRegionException;
-import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.CatalogJanitor;
-import org.apache.hadoop.hbase.master.RegionPlan;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.ServerManager;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsState;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-
-/**
- * The procedure to Merge a region in a table.
- */
-@InterfaceAudience.Private
-public class DispatchMergingRegionsProcedure
-    extends AbstractStateMachineTableProcedure<DispatchMergingRegionsState> {
-  private static final Log LOG = LogFactory.getLog(DispatchMergingRegionsProcedure.class);
-
-  private final AtomicBoolean aborted = new AtomicBoolean(false);
-  private Boolean traceEnabled;
-  private AssignmentManager assignmentManager;
-  private int timeout;
-  private ServerName regionLocation;
-  private String regionsToMergeListFullName;
-  private String regionsToMergeListEncodedName;
-
-  private TableName tableName;
-  private HRegionInfo [] regionsToMerge;
-  private boolean forcible;
-
-  public DispatchMergingRegionsProcedure() {
-    this.traceEnabled = isTraceEnabled();
-    this.assignmentManager = null;
-    this.timeout = -1;
-    this.regionLocation = null;
-    this.regionsToMergeListFullName = null;
-    this.regionsToMergeListEncodedName = null;
-  }
-
-  public DispatchMergingRegionsProcedure(
-      final MasterProcedureEnv env,
-      final TableName tableName,
-      final HRegionInfo [] regionsToMerge,
-      final boolean forcible) {
-    super(env);
-    this.traceEnabled = isTraceEnabled();
-    this.assignmentManager = getAssignmentManager(env);
-    this.tableName = tableName;
-    // For now, we only merge 2 regions.  It could be extended to more than 2 regions in
-    // the future.
-    assert(regionsToMerge.length == 2);
-    this.regionsToMerge = regionsToMerge;
-    this.forcible = forcible;
-
-    this.timeout = -1;
-    this.regionsToMergeListFullName = getRegionsToMergeListFullNameString();
-    this.regionsToMergeListEncodedName = getRegionsToMergeListEncodedNameString();
-  }
-
-  @Override
-  protected Flow executeFromState(
-      final MasterProcedureEnv env,
-      final DispatchMergingRegionsState state) throws InterruptedException {
-    if (isTraceEnabled()) {
-      LOG.trace(this + " execute state=" + state);
-    }
-
-    try {
-      switch (state) {
-      case DISPATCH_MERGING_REGIONS_PREPARE:
-        prepareMergeRegion(env);
-        setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_PRE_OPERATION);
-        break;
-      case DISPATCH_MERGING_REGIONS_PRE_OPERATION:
-        //Unused for now - reserve to add preMerge coprocessor in the future
-        setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS);
-        break;
-      case DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS:
-        if (MoveRegionsToSameRS(env)) {
-          setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS);
-        } else {
-          LOG.info("Cancel merging regions " + getRegionsToMergeListFullNameString()
-            + ", because can't move them to the same RS");
-          setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_POST_OPERATION);
-        }
-        break;
-      case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS:
-        doMergeInRS(env);
-        setNextState(DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_POST_OPERATION);
-        break;
-      case DISPATCH_MERGING_REGIONS_POST_OPERATION:
-        //Unused for now - reserve to add postCompletedMerge coprocessor in the future
-        return Flow.NO_MORE_STATE;
-      default:
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
-      }
-    } catch (IOException e) {
-      LOG.warn("Error trying to merge regions " + getRegionsToMergeListFullNameString() +
-        " in the table " + tableName + " (in state=" + state + ")", e);
-
-      setFailure("master-merge-regions", e);
-    }
-    return Flow.HAS_MORE_STATE;
-  }
-
-  @Override
-  protected void rollbackState(
-      final MasterProcedureEnv env,
-      final DispatchMergingRegionsState state) throws IOException, InterruptedException {
-    if (isTraceEnabled()) {
-      LOG.trace(this + " rollback state=" + state);
-    }
-
-    try {
-      switch (state) {
-      case DISPATCH_MERGING_REGIONS_POST_OPERATION:
-      case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS:
-        String msg = this + " We are in the " + state + " state."
-            + " It is complicated to rollback the merge operation that region server is working on."
-            + " Rollback is not supported and we should let the merge operation to complete";
-        LOG.warn(msg);
-        // PONR
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
-      case DISPATCH_MERGING_REGIONS_MOVE_REGION_TO_SAME_RS:
-        break; // nothing to rollback
-      case DISPATCH_MERGING_REGIONS_PRE_OPERATION:
-        break; // nothing to rollback
-      case DISPATCH_MERGING_REGIONS_PREPARE:
-        break; // nothing to rollback
-      default:
-        throw new UnsupportedOperationException(this + " unhandled state=" + state);
-      }
-    } catch (Exception e) {
-      // This will be retried. Unless there is a bug in the code,
-      // this should be just a "temporary error" (e.g. network down)
-      LOG.warn("Failed rollback attempt step " + state + " for merging the regions "
-          + getRegionsToMergeListFullNameString() + " in table " + tableName, e);
-      throw e;
-    }
-  }
-
-  @Override
-  protected DispatchMergingRegionsState getState(final int stateId) {
-    return DispatchMergingRegionsState.valueOf(stateId);
-  }
-
-  @Override
-  protected int getStateId(final DispatchMergingRegionsState state) {
-    return state.getNumber();
-  }
-
-  @Override
-  protected DispatchMergingRegionsState getInitialState() {
-    return DispatchMergingRegionsState.DISPATCH_MERGING_REGIONS_PREPARE;
-  }
-
-  /*
-   * Check whether we are in the state that can be rollback
-   */
-  @Override
-  protected boolean isRollbackSupported(final DispatchMergingRegionsState state) {
-    switch (state) {
-    case DISPATCH_MERGING_REGIONS_POST_OPERATION:
-    case DISPATCH_MERGING_REGIONS_DO_MERGE_IN_RS:
-        // It is not safe to rollback if we reach to these states.
-        return false;
-      default:
-        break;
-    }
-    return true;
-  }
-
-  @Override
-  public void serializeStateData(final OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
-
-    MasterProcedureProtos.DispatchMergingRegionsStateData.Builder dispatchMergingRegionsMsg =
-        MasterProcedureProtos.DispatchMergingRegionsStateData.newBuilder()
-        .setUserInfo(MasterProcedureUtil.toProtoUserInfo(getUser()))
-        .setTableName(ProtobufUtil.toProtoTableName(tableName))
-        .setForcible(forcible);
-    for (HRegionInfo hri: regionsToMerge) {
-      dispatchMergingRegionsMsg.addRegionInfo(HRegionInfo.convert(hri));
-    }
-    dispatchMergingRegionsMsg.build().writeDelimitedTo(stream);
-  }
-
-  @Override
-  public void deserializeStateData(final InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
-
-    MasterProcedureProtos.DispatchMergingRegionsStateData dispatchMergingRegionsMsg =
-        MasterProcedureProtos.DispatchMergingRegionsStateData.parseDelimitedFrom(stream);
-    setUser(MasterProcedureUtil.toUserInfo(dispatchMergingRegionsMsg.getUserInfo()));
-    tableName = ProtobufUtil.toTableName(dispatchMergingRegionsMsg.getTableName());
-
-    assert(dispatchMergingRegionsMsg.getRegionInfoCount() == 2);
-    regionsToMerge = new HRegionInfo[dispatchMergingRegionsMsg.getRegionInfoCount()];
-    for (int i = 0; i < regionsToMerge.length; i++) {
-      regionsToMerge[i] = HRegionInfo.convert(dispatchMergingRegionsMsg.getRegionInfo(i));
-    }
-  }
-
-  @Override
-  public void toStringClassDetails(StringBuilder sb) {
-    sb.append(getClass().getSimpleName());
-    sb.append(" (table=");
-    sb.append(tableName);
-    sb.append(" regions=");
-    sb.append(getRegionsToMergeListFullNameString());
-    sb.append(" forcible=");
-    sb.append(forcible);
-    sb.append(")");
-  }
-
-  @Override
-  protected boolean acquireLock(final MasterProcedureEnv env) {
-    return !env.getProcedureQueue().waitRegions(
-      this, getTableName(), regionsToMerge[0], regionsToMerge[1]);
-  }
-
-  @Override
-  protected void releaseLock(final MasterProcedureEnv env) {
-    env.getProcedureQueue().wakeRegions(this, getTableName(), regionsToMerge[0], regionsToMerge[1]);
-  }
-
-  @Override
-  public TableName getTableName() {
-    return tableName;
-  }
-
-  @Override
-  public TableOperationType getTableOperationType() {
-    return TableOperationType.MERGE;
-  }
-
-  /**
-   * Prepare merge and do some check
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void prepareMergeRegion(final MasterProcedureEnv env) throws IOException {
-    // Note: the following logic assumes that we only have 2 regions to merge.  In the future,
-    // if we want to extend to more than 2 regions, the code needs to modify a little bit.
-    //
-    CatalogJanitor catalogJanitor = env.getMasterServices().getCatalogJanitor();
-    boolean regionAHasMergeQualifier = !catalogJanitor.cleanMergeQualifier(regionsToMerge[0]);
-    if (regionAHasMergeQualifier
-        || !catalogJanitor.cleanMergeQualifier(regionsToMerge[1])) {
-      String msg = "Skip merging regions " + regionsToMerge[0].getRegionNameAsString()
-          + ", " + regionsToMerge[1].getRegionNameAsString() + ", because region "
-          + (regionAHasMergeQualifier ? regionsToMerge[0].getEncodedName() : regionsToMerge[1]
-              .getEncodedName()) + " has merge qualifier";
-      LOG.info(msg);
-      throw new MergeRegionException(msg);
-    }
-
-      RegionStates regionStates = getAssignmentManager(env).getRegionStates();
-      RegionState regionStateA = regionStates.getRegionState(regionsToMerge[0].getEncodedName());
-      RegionState regionStateB = regionStates.getRegionState(regionsToMerge[1].getEncodedName());
-      if (regionStateA == null || regionStateB == null) {
-        throw new UnknownRegionException(
-          regionStateA == null ?
-              regionsToMerge[0].getEncodedName() : regionsToMerge[1].getEncodedName());
-      }
-
-      if (!regionStateA.isOpened() || !regionStateB.isOpened()) {
-        throw new MergeRegionException(
-          "Unable to merge regions not online " + regionStateA + ", " + regionStateB);
-      }
-
-      if (regionsToMerge[0].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
-          regionsToMerge[1].getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
-        throw new MergeRegionException("Can't merge non-default replicas");
-      }
-
-      if (!forcible && !HRegionInfo.areAdjacent(regionsToMerge[0], regionsToMerge[1])) {
-        throw new MergeRegionException(
-          "Unable to merge not adjacent regions "
-            + regionsToMerge[0].getRegionNameAsString() + ", "
-            + regionsToMerge[1].getRegionNameAsString()
-            + " where forcible = " + forcible);
-      }
-  }
-
-  /**
-   * Move all regions to the same region server
-   * @param env MasterProcedureEnv
-   * @return whether target regions hosted by the same RS
-   * @throws IOException
-   */
-  private boolean MoveRegionsToSameRS(final MasterProcedureEnv env) throws IOException {
-    // Make sure regions are on the same regionserver before send merge
-    // regions request to region server.
-    //
-    boolean onSameRS = isRegionsOnTheSameServer(env);
-    if (!onSameRS) {
-      // Note: the following logic assumes that we only have 2 regions to merge.  In the future,
-      // if we want to extend to more than 2 regions, the code needs to modify a little bit.
-      //
-      RegionStates regionStates = getAssignmentManager(env).getRegionStates();
-      ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
-
-      RegionLoad loadOfRegionA = getRegionLoad(env, regionLocation, regionsToMerge[0]);
-      RegionLoad loadOfRegionB = getRegionLoad(env, regionLocation2, regionsToMerge[1]);
-      if (loadOfRegionA != null && loadOfRegionB != null
-          && loadOfRegionA.getRequestsCount() < loadOfRegionB.getRequestsCount()) {
-        // switch regionsToMerge[0] and regionsToMerge[1]
-        HRegionInfo tmpRegion = this.regionsToMerge[0];
-        this.regionsToMerge[0] = this.regionsToMerge[1];
-        this.regionsToMerge[1] = tmpRegion;
-        ServerName tmpLocation = regionLocation;
-        regionLocation = regionLocation2;
-        regionLocation2 = tmpLocation;
-      }
-
-      long startTime = EnvironmentEdgeManager.currentTime();
-
-      RegionPlan regionPlan = new RegionPlan(regionsToMerge[1], regionLocation2, regionLocation);
-      LOG.info("Moving regions to same server for merge: " + regionPlan.toString());
-      getAssignmentManager(env).balance(regionPlan);
-      do {
-        try {
-          Thread.sleep(20);
-          // Make sure check RIT first, then get region location, otherwise
-          // we would make a wrong result if region is online between getting
-          // region location and checking RIT
-          boolean isRIT = regionStates.isRegionInTransition(regionsToMerge[1]);
-          regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[1]);
-          onSameRS = regionLocation.equals(regionLocation2);
-          if (onSameRS || !isRIT) {
-            // Regions are on the same RS, or regionsToMerge[1] is not in
-            // RegionInTransition any more
-            break;
-          }
-        } catch (InterruptedException e) {
-          InterruptedIOException iioe = new InterruptedIOException();
-          iioe.initCause(e);
-          throw iioe;
-        }
-      } while ((EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
-    }
-    return onSameRS;
-  }
-
-  /**
-   * Do the real merge operation in the region server that hosts regions
-   * @param env MasterProcedureEnv
-   * @throws IOException
-   */
-  private void doMergeInRS(final MasterProcedureEnv env) throws IOException {
-    long duration = 0;
-    long startTime = EnvironmentEdgeManager.currentTime();
-    do {
-      try {
-        if (getServerName(env) == null) {
-          // The merge probably already happen. Check
-          RegionState regionState = getAssignmentManager(env).getRegionStates().getRegionState(
-            regionsToMerge[0].getEncodedName());
-          if (regionState.isMerging() || regionState.isMerged()) {
-            LOG.info("Merge regions " +  getRegionsToMergeListEncodedNameString() +
-              " is in progress or completed.  No need to send a new request.");
-          } else {
-            LOG.warn("Cannot sending merge to hosting server of the regions " +
-              getRegionsToMergeListEncodedNameString() + " as the server is unknown");
-          }
-          return;
-        }
-        // TODO: the following RPC call is not idempotent.  Multiple calls (eg. after master
-        // failover, re-execute this step) could result in some exception thrown that does not
-        // paint the correct picture.  This behavior is on-par with old releases.  Improvement
-        // could happen in the future.
-        env.getMasterServices().getServerManager().sendRegionsMerge(
-          getServerName(env),
-          regionsToMerge[0],
-          regionsToMerge[1],
-          forcible,
-          getUser());
-        LOG.info("Sent merge to server " + getServerName(env) + " for region " +
-            getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible);
-        return;
-      } catch (RegionOpeningException roe) {
-        // Do a retry since region should be online on RS immediately
-        LOG.warn("Failed mergering regions in " + getServerName(env) + ", retrying...", roe);
-      } catch (Exception ie) {
-        LOG.warn("Failed sending merge to " + getServerName(env) + " for regions " +
-            getRegionsToMergeListEncodedNameString() + ", forcible=" + forcible, ie);
-        return;
-      }
-    } while ((duration = EnvironmentEdgeManager.currentTime() - startTime) <= getTimeout(env));
-
-    // If we reaches here, it means that we get timed out.
-    String msg = "Failed sending merge to " + getServerName(env) + " after " + duration + "ms";
-    LOG.warn(msg);
-    throw new IOException(msg);
-  }
-
-  private RegionLoad getRegionLoad(
-      final MasterProcedureEnv env,
-      final ServerName sn,
-      final HRegionInfo hri) {
-    ServerManager serverManager =  env.getMasterServices().getServerManager();
-    ServerLoad load = serverManager.getLoad(sn);
-    if (load != null) {
-      Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
-      if (regionsLoad != null) {
-        return regionsLoad.get(hri.getRegionName());
-      }
-    }
-    return null;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @param env MasterProcedureEnv
-   * @return whether target regions hosted by the same RS
-   */
-  private boolean isRegionsOnTheSameServer(final MasterProcedureEnv env) throws IOException{
-    Boolean onSameRS = true;
-    int i = 0;
-    RegionStates regionStates = getAssignmentManager(env).getRegionStates();
-    regionLocation = regionStates.getRegionServerOfRegion(regionsToMerge[i]);
-    if (regionLocation != null) {
-      for(i = 1; i < regionsToMerge.length; i++) {
-        ServerName regionLocation2 = regionStates.getRegionServerOfRegion(regionsToMerge[i]);
-        if (regionLocation2 != null) {
-          if (onSameRS) {
-            onSameRS = regionLocation.equals(regionLocation2);
-          }
-        } else {
-          // At least one region is not online, merge will fail, no need to continue.
-          break;
-        }
-      }
-      if (i == regionsToMerge.length) {
-        // Finish checking all regions, return the result;
-        return onSameRS;
-      }
-    }
-
-    // If reaching here, at least one region is not online.
-    String msg = "Skip merging regions " + getRegionsToMergeListFullNameString() +
-        ", because region " + regionsToMerge[i].getEncodedName() + " is not online now.";
-    LOG.warn(msg);
-    throw new IOException(msg);
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @param env MasterProcedureEnv
-   * @return assignmentManager
-   */
-  private AssignmentManager getAssignmentManager(final MasterProcedureEnv env) {
-    if (assignmentManager == null) {
-      assignmentManager = env.getMasterServices().getAssignmentManager();
-    }
-    return assignmentManager;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @param env MasterProcedureEnv
-   * @return timeout value
-   */
-  private int getTimeout(final MasterProcedureEnv env) {
-    if (timeout == -1) {
-      timeout = env.getMasterConfiguration().getInt(
-        "hbase.master.regionmerge.timeout", regionsToMerge.length * 60 * 1000);
-    }
-    return timeout;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @param env MasterProcedureEnv
-   * @return serverName
-   */
-  private ServerName getServerName(final MasterProcedureEnv env) {
-    if (regionLocation == null) {
-      regionLocation =
-          getAssignmentManager(env).getRegionStates().getRegionServerOfRegion(regionsToMerge[0]);
-    }
-    return regionLocation;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @param fullName whether return only encoded name
-   * @return region names in a list
-   */
-  private String getRegionsToMergeListFullNameString() {
-    if (regionsToMergeListFullName == null) {
-      StringBuilder sb = new StringBuilder("[");
-      int i = 0;
-      while(i < regionsToMerge.length - 1) {
-        sb.append(regionsToMerge[i].getRegionNameAsString() + ", ");
-        i++;
-      }
-      sb.append(regionsToMerge[i].getRegionNameAsString() + " ]");
-      regionsToMergeListFullName = sb.toString();
-    }
-    return regionsToMergeListFullName;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @return encoded region names
-   */
-  private String getRegionsToMergeListEncodedNameString() {
-    if (regionsToMergeListEncodedName == null) {
-      StringBuilder sb = new StringBuilder("[");
-      int i = 0;
-      while(i < regionsToMerge.length - 1) {
-        sb.append(regionsToMerge[i].getEncodedName() + ", ");
-        i++;
-      }
-      sb.append(regionsToMerge[i].getEncodedName() + " ]");
-      regionsToMergeListEncodedName = sb.toString();
-    }
-    return regionsToMergeListEncodedName;
-  }
-
-  /**
-   * The procedure could be restarted from a different machine. If the variable is null, we need to
-   * retrieve it.
-   * @return traceEnabled
-   */
-  private Boolean isTraceEnabled() {
-    if (traceEnabled == null) {
-      traceEnabled = LOG.isTraceEnabled();
-    }
-    return traceEnabled;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
index 1331b86..63929a8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplitThread.java
@@ -217,20 +217,6 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
     return queueLists.toString();
   }
 
-  public synchronized void requestRegionsMerge(final Region a,
-      final Region b, final boolean forcible, long masterSystemTime, User user) {
-    try {
-      mergePool.execute(new RegionMergeRequest(a, b, this.server, forcible, masterSystemTime,user));
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Region merge requested for " + a + "," + b + ", forcible="
-            + forcible + ".  " + this);
-      }
-    } catch (RejectedExecutionException ree) {
-      LOG.warn("Could not execute merge for " + a + "," + b + ", forcible="
-          + forcible, ree);
-    }
-  }
-
   public synchronized boolean requestSplit(final Region r) {
     // don't split regions that are blocking
     if (shouldSplitRegion() && ((HRegion)r).getCompactPriority() >= Store.PRIORITY_USER) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a5172bb..b574c50 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -6881,112 +6881,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           (Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0));
   }
 
-  /**
-   * Merge two HRegions.  The regions must be adjacent and must not overlap.
-   *
-   * @return new merged HRegion
-   * @throws IOException
-   */
-  public static HRegion mergeAdjacent(final HRegion srcA, final HRegion srcB)
-  throws IOException {
-    HRegion a = srcA;
-    HRegion b = srcB;
-
-    // Make sure that srcA comes first; important for key-ordering during
-    // write of the merged file.
-    if (srcA.getRegionInfo().getStartKey() == null) {
-      if (srcB.getRegionInfo().getStartKey() == null) {
-        throw new IOException("Cannot merge two regions with null start key");
-      }
-      // A's start key is null but B's isn't. Assume A comes before B
-    } else if ((srcB.getRegionInfo().getStartKey() == null) ||
-      (Bytes.compareTo(srcA.getRegionInfo().getStartKey(),
-        srcB.getRegionInfo().getStartKey()) > 0)) {
-      a = srcB;
-      b = srcA;
-    }
-
-    if (!(Bytes.compareTo(a.getRegionInfo().getEndKey(),
-        b.getRegionInfo().getStartKey()) == 0)) {
-      throw new IOException("Cannot merge non-adjacent regions");
-    }
-    return merge(a, b);
-  }
-
-  /**
-   * Merge two regions whether they are adjacent or not.
-   *
-   * @param a region a
-   * @param b region b
-   * @return new merged region
-   * @throws IOException
-   */
-  public static HRegion merge(final HRegion a, final HRegion b) throws IOException {
-    if (!a.getRegionInfo().getTable().equals(b.getRegionInfo().getTable())) {
-      throw new IOException("Regions do not belong to the same table");
-    }
-
-    FileSystem fs = a.getRegionFileSystem().getFileSystem();
-    // Make sure each region's cache is empty
-    a.flush(true);
-    b.flush(true);
-
-    // Compact each region so we only have one store file per family
-    a.compact(true);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Files for region: " + a);
-      a.getRegionFileSystem().logFileSystemState(LOG);
-    }
-    b.compact(true);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Files for region: " + b);
-      b.getRegionFileSystem().logFileSystemState(LOG);
-    }
-
-    RegionMergeTransactionImpl rmt = new RegionMergeTransactionImpl(a, b, true);
-    if (!rmt.prepare(null)) {
-      throw new IOException("Unable to merge regions " + a + " and " + b);
-    }
-    HRegionInfo mergedRegionInfo = rmt.getMergedRegionInfo();
-    LOG.info("starting merge of regions: " + a + " and " + b
-        + " into new region " + mergedRegionInfo.getRegionNameAsString()
-        + " with start key <"
-        + Bytes.toStringBinary(mergedRegionInfo.getStartKey())
-        + "> and end key <"
-        + Bytes.toStringBinary(mergedRegionInfo.getEndKey()) + ">");
-    HRegion dstRegion;
-    try {
-      dstRegion = (HRegion)rmt.execute(null, null);
-    } catch (IOException ioe) {
-      rmt.rollback(null, null);
-      throw new IOException("Failed merging region " + a + " and " + b
-          + ", and successfully rolled back");
-    }
-    dstRegion.compact(true);
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Files for new region");
-      dstRegion.getRegionFileSystem().logFileSystemState(LOG);
-    }
-
-    // clear the compacted files if any
-    for (Store s : dstRegion.getStores()) {
-      s.closeAndArchiveCompactedFiles();
-    }
-    if (dstRegion.getRegionFileSystem().hasReferences(dstRegion.getTableDesc())) {
-      throw new IOException("Merged region " + dstRegion
-          + " still has references after the compaction, is compaction canceled?");
-    }
-
-    // Archiving the 'A' region
-    HFileArchiver.archiveRegion(a.getBaseConf(), fs, a.getRegionInfo());
-    // Archiving the 'B' region
-    HFileArchiver.archiveRegion(b.getBaseConf(), fs, b.getRegionInfo());
-
-    LOG.info("merge completed. New region is " + dstRegion);
-    return dstRegion;
-  }
-
   @Override
   public Result get(final Get get) throws IOException {
     prepareGet(get);

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 7b5f799..1e9f16b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -115,8 +115,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
@@ -1642,46 +1640,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   }
 
   /**
-   * Merge regions on the region server.
-   *
-   * @param controller the RPC controller
-   * @param request the request
-   * @return merge regions response
-   * @throws ServiceException
-   */
-  @Override
-  @QosPriority(priority = HConstants.ADMIN_QOS)
-  public MergeRegionsResponse mergeRegions(final RpcController controller,
-      final MergeRegionsRequest request) throws ServiceException {
-    try {
-      checkOpen();
-      requestCount.increment();
-      Region regionA = getRegion(request.getRegionA());
-      Region regionB = getRegion(request.getRegionB());
-      boolean forcible = request.getForcible();
-      long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
-      regionA.startRegionOperation(Operation.MERGE_REGION);
-      regionB.startRegionOperation(Operation.MERGE_REGION);
-      if (regionA.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID ||
-          regionB.getRegionInfo().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) {
-        throw new ServiceException(new MergeRegionException("Can't merge non-default replicas"));
-      }
-      LOG.info("Receiving merging request for  " + regionA + ", " + regionB
-          + ",forcible=" + forcible);
-      regionA.flush(true);
-      regionB.flush(true);
-      regionServer.compactSplitThread.requestRegionsMerge(regionA, regionB, forcible,
-          masterSystemTime, RpcServer.getRequestUser());
-      return MergeRegionsResponse.newBuilder().build();
-    } catch (DroppedSnapshotException ex) {
-      regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
-      throw new ServiceException(ex);
-    } catch (IOException ie) {
-      throw new ServiceException(ie);
-    }
-  }
-
-  /**
    * Open asynchronously a region or a set of regions on the region server.
    *
    * The opening is coordinated by ZooKeeper, and this method requires the znode to be created

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
deleted file mode 100644
index ce69ad3..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeRequest.java
+++ /dev/null
@@ -1,153 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.DroppedSnapshotException;
-import org.apache.hadoop.hbase.master.TableLockManager.TableLock;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Handles processing region merges. Put in a queue, owned by HRegionServer.
- */
-@InterfaceAudience.Private
-class RegionMergeRequest implements Runnable {
-  private static final Log LOG = LogFactory.getLog(RegionMergeRequest.class);
-  private final HRegion region_a;
-  private final HRegion region_b;
-  private final HRegionServer server;
-  private final boolean forcible;
-  private TableLock tableLock;
-  private final long masterSystemTime;
-  private final User user;
-
-  RegionMergeRequest(Region a, Region b, HRegionServer hrs, boolean forcible,
-    long masterSystemTime, User user) {
-    Preconditions.checkNotNull(hrs);
-    this.region_a = (HRegion)a;
-    this.region_b = (HRegion)b;
-    this.server = hrs;
-    this.forcible = forcible;
-    this.masterSystemTime = masterSystemTime;
-    this.user = user;
-  }
-
-  @Override
-  public String toString() {
-    return "MergeRequest,regions:" + region_a + ", " + region_b + ", forcible="
-        + forcible;
-  }
-
-  @Override
-  public void run() {
-    if (this.server.isStopping() || this.server.isStopped()) {
-      LOG.debug("Skipping merge because server is stopping="
-          + this.server.isStopping() + " or stopped=" + this.server.isStopped());
-      return;
-    }
-    try {
-      final long startTime = EnvironmentEdgeManager.currentTime();
-      RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a,
-          region_b, forcible, masterSystemTime);
-
-      //acquire a shared read lock on the table, so that table schema modifications
-      //do not happen concurrently
-      tableLock = server.getTableLockManager().readLock(region_a.getTableDesc().getTableName()
-          , "MERGE_REGIONS:" + region_a.getRegionInfo().getRegionNameAsString() + ", " +
-              region_b.getRegionInfo().getRegionNameAsString());
-      try {
-        tableLock.acquire();
-      } catch (IOException ex) {
-        tableLock = null;
-        throw ex;
-      }
-
-      // If prepare does not return true, for some reason -- logged inside in
-      // the prepare call -- we are not ready to merge just now. Just return.
-      if (!mt.prepare(this.server)) return;
-      try {
-        mt.execute(this.server, this.server, this.user);
-      } catch (Exception e) {
-        if (this.server.isStopping() || this.server.isStopped()) {
-          LOG.info(
-              "Skip rollback/cleanup of failed merge of " + region_a + " and "
-                  + region_b + " because server is"
-                  + (this.server.isStopping() ? " stopping" : " stopped"), e);
-          return;
-        }
-        if (e instanceof DroppedSnapshotException) {
-          server.abort("Replay of WAL required. Forcing server shutdown", e);
-          return;
-        }
-        try {
-          LOG.warn("Running rollback/cleanup of failed merge of "
-                  + region_a +" and "+ region_b + "; " + e.getMessage(), e);
-          if (mt.rollback(this.server, this.server)) {
-            LOG.info("Successful rollback of failed merge of "
-                + region_a +" and "+ region_b);
-          } else {
-            this.server.abort("Abort; we got an error after point-of-no-return"
-                + "when merging " + region_a + " and " + region_b);
-          }
-        } catch (RuntimeException ee) {
-          String msg = "Failed rollback of failed merge of "
-              + region_a +" and "+ region_b + " -- aborting server";
-          // If failed rollback, kill this server to avoid having a hole in
-          // table.
-          LOG.info(msg, ee);
-          this.server.abort(msg);
-        }
-        return;
-      }
-      LOG.info("Regions merged, hbase:meta updated, and report to master. region_a="
-          + region_a + ", region_b=" + region_b + ",merged region="
-          + mt.getMergedRegionInfo().getRegionNameAsString()
-          + ". Region merge took "
-          + StringUtils.formatTimeDiff(EnvironmentEdgeManager.currentTime(), startTime));
-    } catch (IOException ex) {
-      ex = ex instanceof RemoteException ? ((RemoteException) ex).unwrapRemoteException() : ex;
-      LOG.error("Merge failed " + this, ex);
-      server.checkFileSystem();
-    } finally {
-      releaseTableLock();
-    }
-  }
-
-  protected void releaseTableLock() {
-    if (this.tableLock != null) {
-      try {
-        this.tableLock.release();
-      } catch (IOException ex) {
-        LOG.error("Could not release the table lock (something is really wrong). " 
-           + "Aborting this server to avoid holding the lock forever.");
-        this.server.abort("Abort; we got an error when releasing the table lock "
-                         + "on " + region_a.getRegionInfo().getRegionNameAsString());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java
deleted file mode 100644
index c844d54..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionFactory.java
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
-
-/**
- * A factory for creating RegionMergeTransactions, which execute region split as a "transaction".
- * See {@link RegionMergeTransactionImpl}
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-@InterfaceStability.Evolving
-public class RegionMergeTransactionFactory implements Configurable {
-
-  public static final String MERGE_TRANSACTION_IMPL_KEY =
-      "hbase.regionserver.merge.transaction.impl";
-
-  private Configuration conf;
-
-  public RegionMergeTransactionFactory(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  /**
-   * Create a merge transaction
-   * @param a region a to merge
-   * @param b region b to merge
-   * @param forcible if false, we will only merge adjacent regions
-   * @return transaction instance
-   */
-  public RegionMergeTransactionImpl create(final Region a, final Region b,
-      final boolean forcible) {
-    // The implementation class must extend RegionMergeTransactionImpl, not only
-    // implement the RegionMergeTransaction interface like you might expect,
-    // because various places such as AssignmentManager use static methods
-    // from RegionMergeTransactionImpl. Whatever we use for implementation must
-    // be compatible, so it's safest to require ? extends RegionMergeTransactionImpl.
-    // If not compatible we will throw a runtime exception from here.
-    return ReflectionUtils.instantiateWithCustomCtor(
-      conf.getClass(MERGE_TRANSACTION_IMPL_KEY, RegionMergeTransactionImpl.class,
-        RegionMergeTransactionImpl.class).getName(),
-      new Class[] { Region.class, Region.class, boolean.class },
-      new Object[] { a, b, forcible });
-  }
-
-}


[49/50] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/e48b7fa4/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index 8f368e9..899ae9b 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10164,6 +10164,1912 @@ public final class RegionServerStatusProtos {
 
   }
 
+  public interface RegionSpaceUseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    boolean hasRegion();
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion();
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder();
+
+    /**
+     * <pre>
+     * The size in bytes of the region
+     * </pre>
+     *
+     * <code>optional uint64 size = 2;</code>
+     */
+    boolean hasSize();
+    /**
+     * <pre>
+     * The size in bytes of the region
+     * </pre>
+     *
+     * <code>optional uint64 size = 2;</code>
+     */
+    long getSize();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionSpaceUse}
+   */
+  public  static final class RegionSpaceUse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUse)
+      RegionSpaceUseOrBuilder {
+    // Use RegionSpaceUse.newBuilder() to construct.
+    private RegionSpaceUse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private RegionSpaceUse() {
+      size_ = 0L;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionSpaceUse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = region_.toBuilder();
+              }
+              region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(region_);
+                region_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              size_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int REGION_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_;
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    public boolean hasRegion() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
+      return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+    }
+    /**
+     * <pre>
+     * A region identifier
+     * </pre>
+     *
+     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
+      return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+    }
+
+    public static final int SIZE_FIELD_NUMBER = 2;
+    private long size_;
+    /**
+     * <pre>
+     * The size in bytes of the region
+     * </pre>
+     *
+     * <code>optional uint64 size = 2;</code>
+     */
+    public boolean hasSize() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <pre>
+     * The size in bytes of the region
+     * </pre>
+     *
+     * <code>optional uint64 size = 2;</code>
+     */
+    public long getSize() {
+      return size_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (hasRegion()) {
+        if (!getRegion().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, getRegion());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, size_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, getRegion());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, size_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) obj;
+
+      boolean result = true;
+      result = result && (hasRegion() == other.hasRegion());
+      if (hasRegion()) {
+        result = result && getRegion()
+            .equals(other.getRegion());
+      }
+      result = result && (hasSize() == other.hasSize());
+      if (hasSize()) {
+        result = result && (getSize()
+            == other.getSize());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasRegion()) {
+        hash = (37 * hash) + REGION_FIELD_NUMBER;
+        hash = (53 * hash) + getRegion().hashCode();
+      }
+      if (hasSize()) {
+        hash = (37 * hash) + SIZE_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getSize());
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RegionSpaceUse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getRegionFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (regionBuilder_ == null) {
+          region_ = null;
+        } else {
+          regionBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        size_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (regionBuilder_ == null) {
+          result.region_ = region_;
+        } else {
+          result.region_ = regionBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.size_ = size_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()) return this;
+        if (other.hasRegion()) {
+          mergeRegion(other.getRegion());
+        }
+        if (other.hasSize()) {
+          setSize(other.getSize());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (hasRegion()) {
+          if (!getRegion().isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_;
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public boolean hasRegion() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
+        if (regionBuilder_ == null) {
+          return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+        } else {
+          return regionBuilder_.getMessage();
+        }
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          region_ = value;
+          onChanged();
+        } else {
+          regionBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public Builder setRegion(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionBuilder_ == null) {
+          region_ = builderForValue.build();
+          onChanged();
+        } else {
+          regionBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              region_ != null &&
+              region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+            region_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial();
+          } else {
+            region_ = value;
+          }
+          onChanged();
+        } else {
+          regionBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public Builder clearRegion() {
+        if (regionBuilder_ == null) {
+          region_ = null;
+          onChanged();
+        } else {
+          regionBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getRegionFieldBuilder().getBuilder();
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
+        if (regionBuilder_ != null) {
+          return regionBuilder_.getMessageOrBuilder();
+        } else {
+          return region_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+        }
+      }
+      /**
+       * <pre>
+       * A region identifier
+       * </pre>
+       *
+       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+          getRegionFieldBuilder() {
+        if (regionBuilder_ == null) {
+          regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+                  getRegion(),
+                  getParentForChildren(),
+                  isClean());
+          region_ = null;
+        }
+        return regionBuilder_;
+      }
+
+      private long size_ ;
+      /**
+       * <pre>
+       * The size in bytes of the region
+       * </pre>
+       *
+       * <code>optional uint64 size = 2;</code>
+       */
+      public boolean hasSize() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <pre>
+       * The size in bytes of the region
+       * </pre>
+       *
+       * <code>optional uint64 size = 2;</code>
+       */
+      public long getSize() {
+        return size_;
+      }
+      /**
+       * <pre>
+       * The size in bytes of the region
+       * </pre>
+       *
+       * <code>optional uint64 size = 2;</code>
+       */
+      public Builder setSize(long value) {
+        bitField0_ |= 0x00000002;
+        size_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <pre>
+       * The size in bytes of the region
+       * </pre>
+       *
+       * <code>optional uint64 size = 2;</code>
+       */
+      public Builder clearSize() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        size_ = 0L;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<RegionSpaceUse>() {
+      public RegionSpaceUse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new RegionSpaceUse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface RegionSpaceUseReportRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse>
+        getSpaceUseList();
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index);
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    int getSpaceUseCount();
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
+        getSpaceUseOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder(
+        int index);
+  }
+  /**
+   * <pre>
+   **
+   * Reports filesystem usage for regions.
+   * </pre>
+   *
+   * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest}
+   */
+  public  static final class RegionSpaceUseReportRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportRequest)
+      RegionSpaceUseReportRequestOrBuilder {
+    // Use RegionSpaceUseReportRequest.newBuilder() to construct.
+    private RegionSpaceUseReportRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private RegionSpaceUseReportRequest() {
+      spaceUse_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionSpaceUseReportRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                spaceUse_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              spaceUse_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class);
+    }
+
+    public static final int SPACE_USE_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> spaceUse_;
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> getSpaceUseList() {
+      return spaceUse_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
+        getSpaceUseOrBuilderList() {
+      return spaceUse_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    public int getSpaceUseCount() {
+      return spaceUse_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) {
+      return spaceUse_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder(
+        int index) {
+      return spaceUse_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      for (int i = 0; i < getSpaceUseCount(); i++) {
+        if (!getSpaceUse(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      for (int i = 0; i < spaceUse_.size(); i++) {
+        output.writeMessage(1, spaceUse_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < spaceUse_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, spaceUse_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) obj;
+
+      boolean result = true;
+      result = result && getSpaceUseList()
+          .equals(other.getSpaceUseList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (getSpaceUseCount() > 0) {
+        hash = (37 * hash) + SPACE_USE_FIELD_NUMBER;
+        hash = (53 * hash) + getSpaceUseList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * <pre>
+     **
+     * Reports filesystem usage for regions.
+     * </pre>
+     *
+     * Protobuf type {@code hbase.pb.RegionSpaceUseReportRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getSpaceUseFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (spaceUseBuilder_ == null) {
+          spaceUse_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          spaceUseBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest(this);
+        int from_bitField0_ = bitField0_;
+        if (spaceUseBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            spaceUse_ = java.util.Collections.unmodifiableList(spaceUse_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.spaceUse_ = spaceUse_;
+        } else {
+          result.spaceUse_ = spaceUseBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest.getDefaultInstance()) return this;
+        if (spaceUseBuilder_ == null) {
+          if (!other.spaceUse_.isEmpty()) {
+            if (spaceUse_.isEmpty()) {
+              spaceUse_ = other.spaceUse_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureSpaceUseIsMutable();
+              spaceUse_.addAll(other.spaceUse_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.spaceUse_.isEmpty()) {
+            if (spaceUseBuilder_.isEmpty()) {
+              spaceUseBuilder_.dispose();
+              spaceUseBuilder_ = null;
+              spaceUse_ = other.spaceUse_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              spaceUseBuilder_ =
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getSpaceUseFieldBuilder() : null;
+            } else {
+              spaceUseBuilder_.addAllMessages(other.spaceUse_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getSpaceUseCount(); i++) {
+          if (!getSpaceUse(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> spaceUse_ =
+        java.util.Collections.emptyList();
+      private void ensureSpaceUseIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          spaceUse_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse>(spaceUse_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> spaceUseBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> getSpaceUseList() {
+        if (spaceUseBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(spaceUse_);
+        } else {
+          return spaceUseBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public int getSpaceUseCount() {
+        if (spaceUseBuilder_ == null) {
+          return spaceUse_.size();
+        } else {
+          return spaceUseBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse getSpaceUse(int index) {
+        if (spaceUseBuilder_ == null) {
+          return spaceUse_.get(index);
+        } else {
+          return spaceUseBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder setSpaceUse(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) {
+        if (spaceUseBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureSpaceUseIsMutable();
+          spaceUse_.set(index, value);
+          onChanged();
+        } else {
+          spaceUseBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder setSpaceUse(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) {
+        if (spaceUseBuilder_ == null) {
+          ensureSpaceUseIsMutable();
+          spaceUse_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          spaceUseBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder addSpaceUse(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) {
+        if (spaceUseBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureSpaceUseIsMutable();
+          spaceUse_.add(value);
+          onChanged();
+        } else {
+          spaceUseBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder addSpaceUse(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse value) {
+        if (spaceUseBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureSpaceUseIsMutable();
+          spaceUse_.add(index, value);
+          onChanged();
+        } else {
+          spaceUseBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder addSpaceUse(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) {
+        if (spaceUseBuilder_ == null) {
+          ensureSpaceUseIsMutable();
+          spaceUse_.add(builderForValue.build());
+          onChanged();
+        } else {
+          spaceUseBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder addSpaceUse(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder builderForValue) {
+        if (spaceUseBuilder_ == null) {
+          ensureSpaceUseIsMutable();
+          spaceUse_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          spaceUseBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder addAllSpaceUse(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> values) {
+        if (spaceUseBuilder_ == null) {
+          ensureSpaceUseIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, spaceUse_);
+          onChanged();
+        } else {
+          spaceUseBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder clearSpaceUse() {
+        if (spaceUseBuilder_ == null) {
+          spaceUse_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          spaceUseBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public Builder removeSpaceUse(int index) {
+        if (spaceUseBuilder_ == null) {
+          ensureSpaceUseIsMutable();
+          spaceUse_.remove(index);
+          onChanged();
+        } else {
+          spaceUseBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder getSpaceUseBuilder(
+          int index) {
+        return getSpaceUseFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder getSpaceUseOrBuilder(
+          int index) {
+        if (spaceUseBuilder_ == null) {
+          return spaceUse_.get(index);  } else {
+          return spaceUseBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
+           getSpaceUseOrBuilderList() {
+        if (spaceUseBuilder_ != null) {
+          return spaceUseBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(spaceUse_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder() {
+        return getSpaceUseFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder addSpaceUseBuilder(
+          int index) {
+        return getSpaceUseFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder>
+           getSpaceUseBuilderList() {
+        return getSpaceUseFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
+          getSpaceUseFieldBuilder() {
+        if (spaceUseBuilder_ == null) {
+          spaceUseBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>(
+                  spaceUse_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          spaceUse_ = null;
+        }
+        return spaceUseBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<RegionSpaceUseReportRequest>() {
+      public RegionSpaceUseReportRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new RegionSpaceUseReportRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface RegionSpaceUseReportResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.RegionSpaceUseReportResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse}
+   */
+  public  static final class RegionSpaceUseReportResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.RegionSpaceUseReportResponse)
+      RegionSpaceUseReportResponseOrBuilder {
+    // Use RegionSpaceUseReportResponse.newBuilder() to construct.
+    private RegionSpaceUseReportResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private RegionSpaceUseReportResponse() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private RegionSpaceUseReportResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.RegionSpaceUseReportResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.RegionSpaceUseReportResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.RegionSpaceUseReportResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.RegionSpaceUseReportResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<RegionSpaceUseReportResponse>() {
+      public RegionSpaceUseReportResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new RegionSpaceUseReportResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<RegionSpaceUseReportResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
   /**
    * Protobuf service {@code hbase.pb.RegionServerStatusService}
    */
@@ -10265,6 +12171,19 @@ public final class RegionServerStatusProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse> done);
 
+      /**
+       * <pre>
+       **
+       * Reports Region filesystem space use
+       * </pre>
+       *
+       * <code>rpc ReportRegionSpaceUse(.hbase.pb.RegionSpaceUseReportRequest) returns (.hbase.pb.RegionSpaceUseReportResponse);</code>
+       */
+      public abstract void reportRegionSpaceUse(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse> done);
+
     }
 
     public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
@@ -10326,6 +12245,14 @@ public final class RegionServerStatusProtos {
           impl.getProcedureResult(controller, request, done);
         }
 
+        @java.lang.Override
+        public  void reportRegionSpaceUse(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse> done) {
+          impl.reportRegionSpaceUse(controller, request, done);
+        }
+
       };
     }
 
@@ -10362,6 +12289,8 @@ public final class RegionServerStatusProtos {
               return impl.splitRegion(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.SplitTableRegionRequest)request);
             case 6:
               return impl.getProcedureResult(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest)request);
+            case 7:
+              return impl.reportRegionSpaceUse(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
           }
@@ -10390

<TRUNCATED>

[03/50] [abbrv] hbase git commit: HBASE-17469 Properly handle empty TableName in TablePermission#readFields and #write (Manjunath Anand)

Posted by el...@apache.org.
HBASE-17469 Properly handle empty TableName in TablePermission#readFields and #write (Manjunath Anand)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/faa9f735
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/faa9f735
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/faa9f735

Branch: refs/heads/HBASE-16961
Commit: faa9f735ca67ee3a2e1a59d93b519421e97e940f
Parents: 9b38c1a
Author: tedyu <yu...@gmail.com>
Authored: Tue Jan 17 09:36:59 2017 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Tue Jan 17 09:36:59 2017 -0800

----------------------------------------------------------------------
 .../hbase/security/access/TablePermission.java  | 20 +++++++++++---------
 .../security/access/TestTablePermissions.java   |  2 ++
 2 files changed, 13 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/faa9f735/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
index cf3f071..e9ecea4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
@@ -189,7 +189,7 @@ public class TablePermission extends Permission {
    *   by this permission, <code>false</code>
    */
   public boolean implies(String namespace, Action action) {
-    if (!this.namespace.equals(namespace)) {
+    if (this.namespace == null || !this.namespace.equals(namespace)) {
       return false;
     }
 
@@ -212,7 +212,7 @@ public class TablePermission extends Permission {
    */
   public boolean implies(TableName table, byte[] family, byte[] qualifier,
       Action action) {
-    if (!this.table.equals(table)) {
+    if (this.table == null || !this.table.equals(table)) {
       return false;
     }
 
@@ -242,7 +242,7 @@ public class TablePermission extends Permission {
    *   by this permission, otherwise <code>false</code>
    */
   public boolean implies(TableName table, KeyValue kv, Action action) {
-    if (!this.table.equals(table)) {
+    if (this.table == null || !this.table.equals(table)) {
       return false;
     }
 
@@ -267,7 +267,7 @@ public class TablePermission extends Permission {
    * return false.
    */
   public boolean matchesFamily(TableName table, byte[] family, Action action) {
-    if (!this.table.equals(table)) {
+    if (this.table == null || !this.table.equals(table)) {
       return false;
     }
 
@@ -356,17 +356,16 @@ public class TablePermission extends Permission {
       str.append("namespace=").append(namespace)
          .append(", ");
     }
-    else if(table != null) {
+    if(table != null) {
        str.append("table=").append(table)
           .append(", family=")
           .append(family == null ? null : Bytes.toString(family))
           .append(", qualifier=")
           .append(qualifier == null ? null : Bytes.toString(qualifier))
           .append(", ");
-    } else {
-      str.append("actions=");
     }
     if (actions != null) {
+      str.append("actions=");
       for (int i=0; i<actions.length; i++) {
         if (i > 0)
           str.append(",");
@@ -385,7 +384,9 @@ public class TablePermission extends Permission {
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
     byte[] tableBytes = Bytes.readByteArray(in);
-    table = TableName.valueOf(tableBytes);
+    if(tableBytes.length > 0) {
+      table = TableName.valueOf(tableBytes);
+    }
     if (in.readBoolean()) {
       family = Bytes.readByteArray(in);
     }
@@ -400,7 +401,8 @@ public class TablePermission extends Permission {
   @Override
   public void write(DataOutput out) throws IOException {
     super.write(out);
-    Bytes.writeByteArray(out, table.getName());
+    // Explicitly writing null to maintain se/deserialize backward compatibility.
+    Bytes.writeByteArray(out, (table == null) ? null : table.getName());
     out.writeBoolean(family != null);
     if (family != null) {
       Bytes.writeByteArray(out, family);

http://git-wip-us.apache.org/repos/asf/hbase/blob/faa9f735/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
index 1e525e2..b23651f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestTablePermissions.java
@@ -349,6 +349,8 @@ public class TestTablePermissions {
         TablePermission.Action.READ));
     permissions.put("hubert", new TablePermission(TEST_TABLE2, null,
         TablePermission.Action.READ, TablePermission.Action.WRITE));
+    permissions.put("bruce",new TablePermission(TEST_NAMESPACE,
+        TablePermission.Action.READ));
     return permissions;
   }
 


[18/50] [abbrv] hbase git commit: HBASE-17480 Remove split region code from Region Server (Stephen Yuan Jiang)

Posted by el...@apache.org.
HBASE-17480 Remove split region code from Region Server (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bff7c4f1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bff7c4f1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bff7c4f1

Branch: refs/heads/HBASE-16961
Commit: bff7c4f1fda5517c469db7863706140e3c97e9e0
Parents: cb9ce2c
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Thu Jan 19 09:05:19 2017 -0800
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Thu Jan 19 09:05:19 2017 -0800

----------------------------------------------------------------------
 .../hbase/regionserver/SplitTransaction.java    | 265 ------
 .../regionserver/SplitTransactionFactory.java   |  74 --
 .../regionserver/SplitTransactionImpl.java      | 832 -------------------
 .../coprocessor/TestCoprocessorInterface.java   | 137 +--
 .../hadoop/hbase/regionserver/TestHRegion.java  | 340 --------
 .../TestHRegionWithInMemoryFlush.java           |  94 ---
 .../regionserver/TestSplitTransaction.java      | 402 ---------
 7 files changed, 30 insertions(+), 2114 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
deleted file mode 100644
index d92bf07..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-
-/**
- * Executes region split as a "transaction".  Call {@link #prepare()} to setup
- * the transaction, {@link #execute(Server, RegionServerServices)} to run the
- * transaction and {@link #rollback(Server, RegionServerServices)} to cleanup if execute fails.
- *
- * <p>Here is an example of how you would use this interface:
- * <pre>
- *  SplitTransactionFactory factory = new SplitTransactionFactory(conf);
- *  SplitTransaction st = factory.create(parent, midKey)
- *    .registerTransactionListener(new TransactionListener() {
- *       public void transition(SplitTransaction transaction, SplitTransactionPhase from,
- *           SplitTransactionPhase to) throws IOException {
- *         // ...
- *       }
- *       public void rollback(SplitTransaction transaction, SplitTransactionPhase from,
- *           SplitTransactionPhase to) {
- *         // ...
- *       }
- *    });
- *  if (!st.prepare()) return;
- *  try {
- *    st.execute(server, services);
- *  } catch (IOException e) {
- *    try {
- *      st.rollback(server, services);
- *      return;
- *    } catch (RuntimeException e) {
- *      // abort the server
- *    }
- *  }
- * </Pre>
- * <p>A split transaction is not thread safe.  Callers must ensure a split is run by
- * one thread only.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-@InterfaceStability.Evolving
-public interface SplitTransaction {
-
-  /**
-   * Each enum is a step in the split transaction.
-   */
-  public enum SplitTransactionPhase {
-    /**
-     * Started
-     */
-    STARTED,
-    /**
-     * Prepared
-     */
-    PREPARED,
-    /**
-     * Before preSplit coprocessor hook
-     */
-    BEFORE_PRE_SPLIT_HOOK,
-    /**
-     * After preSplit coprocessor hook
-     */
-    AFTER_PRE_SPLIT_HOOK,
-    /**
-     * Set region as in transition, set it into SPLITTING state.
-     */
-    SET_SPLITTING,
-    /**
-     * We created the temporary split data directory.
-     */
-    CREATE_SPLIT_DIR,
-    /**
-     * Closed the parent region.
-     */
-    CLOSED_PARENT_REGION,
-    /**
-     * The parent has been taken out of the server's online regions list.
-     */
-    OFFLINED_PARENT,
-    /**
-     * Started in on creation of the first daughter region.
-     */
-    STARTED_REGION_A_CREATION,
-    /**
-     * Started in on the creation of the second daughter region.
-     */
-    STARTED_REGION_B_CREATION,
-    /**
-     * Opened the first daughter region
-     */
-    OPENED_REGION_A,
-    /**
-     * Opened the second daughter region
-     */
-    OPENED_REGION_B,
-    /**
-     * Point of no return.
-     * If we got here, then transaction is not recoverable other than by
-     * crashing out the regionserver.
-     */
-    PONR,
-    /**
-     * Before postSplit coprocessor hook
-     */
-    BEFORE_POST_SPLIT_HOOK,
-    /**
-     * After postSplit coprocessor hook
-     */
-    AFTER_POST_SPLIT_HOOK,
-    /**
-     * Completed
-     */
-    COMPLETED
-  }
-
-  /**
-   * Split transaction journal entry
-   */
-  public interface JournalEntry {
-
-    /** @return the completed phase marked by this journal entry */
-    SplitTransactionPhase getPhase();
-
-    /** @return the time of phase completion */
-    long getTimeStamp();
-  }
-
-  /**
-   * Split transaction listener
-   */
-  public interface TransactionListener {
-
-    /**
-     * Invoked when transitioning forward from one transaction phase to another
-     * @param transaction the transaction
-     * @param from the current phase
-     * @param to the next phase
-     * @throws IOException listener can throw this to abort
-     */
-    void transition(SplitTransaction transaction, SplitTransactionPhase from,
-        SplitTransactionPhase to) throws IOException;
-
-    /**
-     * Invoked when rolling back a transaction from one transaction phase to the
-     * previous
-     * @param transaction the transaction
-     * @param from the current phase
-     * @param to the previous phase
-     */
-    void rollback(SplitTransaction transaction, SplitTransactionPhase from,
-        SplitTransactionPhase to);
-  }
-
-  /**
-   * Check split inputs and prepare the transaction.
-   * @return <code>true</code> if the region is splittable else
-   * <code>false</code> if it is not (e.g. its already closed, etc.).
-   * @throws IOException 
-   */
-  boolean prepare() throws IOException;
-
-  /**
-   * Run the transaction.
-   * @param server Hosting server instance.  Can be null when testing.
-   * @param services Used to online/offline regions.
-   * @throws IOException If thrown, transaction failed.
-   *          Call {@link #rollback(Server, RegionServerServices)}
-   * @return Regions created
-   * @throws IOException
-   * @see #rollback(Server, RegionServerServices)
-   * @deprecated use #execute(Server, RegionServerServices, User);  as of 1.0.2, remove in 3.0
-   */
-  @Deprecated
-  PairOfSameType<Region> execute(Server server, RegionServerServices services) throws IOException;
-
-  /**
-   * Run the transaction.
-   * @param server Hosting server instance.  Can be null when testing.
-   * @param services Used to online/offline regions.
-   * @param user
-   * @throws IOException If thrown, transaction failed.
-   *          Call {@link #rollback(Server, RegionServerServices)}
-   * @return Regions created
-   * @throws IOException
-   * @see #rollback(Server, RegionServerServices)
-   */
-  PairOfSameType<Region> execute(Server server, RegionServerServices services, User user)
-      throws IOException;
-
-  /**
-   * Roll back a failed transaction
-   * @param server Hosting server instance (May be null when testing).
-   * @param services
-   * @throws IOException If thrown, rollback failed.  Take drastic action.
-   * @return True if we successfully rolled back, false if we got to the point
-   * of no return and so now need to abort the server to minimize damage.
-   * @deprecated use #rollback(Server, RegionServerServices, User); as of 1.0.2, remove in 3.0
-   */
-  @Deprecated
-  boolean rollback(Server server, RegionServerServices services) throws IOException;
-
-  /**
-   * Roll back a failed transaction
-   * @param server Hosting server instance (May be null when testing).
-   * @param services
-   * @param user
-   * @throws IOException If thrown, rollback failed.  Take drastic action.
-   * @return True if we successfully rolled back, false if we got to the point
-   * of no return and so now need to abort the server to minimize damage.
-   */
-  boolean rollback(Server server, RegionServerServices services, User user) throws IOException;
-
-  /**
-   * Register a listener for transaction preparation, execution, and possibly
-   * rollback phases.
-   * <p>A listener can abort a transaction by throwing an exception. 
-   * @param listener the listener
-   * @return 'this' for chaining
-   */
-  SplitTransaction registerTransactionListener(TransactionListener listener);
-
-  /**
-   * Get the journal for the transaction.
-   * <p>Journal entries are an opaque type represented as JournalEntry. They can
-   * also provide useful debugging information via their toString method.
-   * @return the transaction journal
-   */
-  List<JournalEntry> getJournal();
-
-  /**
-   * Get the Server running the transaction or rollback
-   * @return server instance
-   */
-  Server getServer();
-
-  /**
-   * Get the RegonServerServices of the server running the transaction or rollback
-   * @return region server services
-   */
-  RegionServerServices getRegionServerServices();
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java
deleted file mode 100644
index 7df8233..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionFactory.java
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.util.ReflectionUtils;
-
-/**
- * A factory for creating SplitTransactions, which execute region split as a "transaction".
- * See {@link SplitTransaction}
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-@InterfaceStability.Evolving
-public class SplitTransactionFactory implements Configurable {
-
-  public static final String SPLIT_TRANSACTION_IMPL_KEY =
-      "hbase.regionserver.split.transaction.impl";
-
-  private Configuration conf;
-
-  public SplitTransactionFactory(Configuration conf) {
-    this.conf = conf;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-  }
-
-  /**
-   * Create a split transaction
-   * @param r the region to split
-   * @param splitrow the split point in the keyspace
-   * @return transaction instance
-   */
-  public SplitTransaction create(final Region r, final byte [] splitrow) {
-    return ReflectionUtils.instantiateWithCustomCtor(
-      // The implementation class must extend SplitTransactionImpl, not only
-      // implement the SplitTransaction interface like you might expect,
-      // because various places such as AssignmentManager use static methods
-      // from SplitTransactionImpl. Whatever we use for implementation must
-      // be compatible, so it's safest to require ? extends SplitTransactionImpl.
-      // If not compatible we will throw a runtime exception from here.
-      conf.getClass(SPLIT_TRANSACTION_IMPL_KEY, SplitTransactionImpl.class,
-        SplitTransactionImpl.class).getName(),
-      new Class[] { Region.class, byte[].class },
-      new Object[] { r, splitrow });
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
deleted file mode 100644
index 96d7bc4..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransactionImpl.java
+++ /dev/null
@@ -1,832 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CancelableProgressable;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HasThread;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.zookeeper.KeeperException;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
-@InterfaceAudience.Private
-public class SplitTransactionImpl implements SplitTransaction {
-  private static final Log LOG = LogFactory.getLog(SplitTransactionImpl.class);
-
-  /*
-   * Region to split
-   */
-  private final HRegion parent;
-  private HRegionInfo hri_a;
-  private HRegionInfo hri_b;
-  private long fileSplitTimeout = 30000;
-
-  /*
-   * Row to split around
-   */
-  private final byte [] splitrow;
-
-  /*
-   * Transaction state for listener, only valid during execute and
-   * rollback
-   */
-  private SplitTransactionPhase currentPhase = SplitTransactionPhase.STARTED;
-  private Server server;
-  private RegionServerServices rsServices;
-
-  public static class JournalEntryImpl implements JournalEntry {
-    private SplitTransactionPhase type;
-    private long timestamp;
-
-    public JournalEntryImpl(SplitTransactionPhase type) {
-      this(type, EnvironmentEdgeManager.currentTime());
-    }
-
-    public JournalEntryImpl(SplitTransactionPhase type, long timestamp) {
-      this.type = type;
-      this.timestamp = timestamp;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append(type);
-      sb.append(" at ");
-      sb.append(timestamp);
-      return sb.toString();
-    }
-
-    @Override
-    public SplitTransactionPhase getPhase() {
-      return type;
-    }
-
-    @Override
-    public long getTimeStamp() {
-      return timestamp;
-    }
-  }
-
-  /*
-   * Journal of how far the split transaction has progressed.
-   */
-  private final ArrayList<JournalEntry> journal = new ArrayList<JournalEntry>();
-
-  /**
-   * Listeners
-   */
-  private final ArrayList<TransactionListener> listeners = new ArrayList<TransactionListener>();
-
-  /**
-   * Constructor
-   * @param r Region to split
-   * @param splitrow Row to split around
-   */
-  public SplitTransactionImpl(final Region r, final byte [] splitrow) {
-    this.parent = (HRegion)r;
-    this.splitrow = splitrow;
-    this.journal.add(new JournalEntryImpl(SplitTransactionPhase.STARTED));
-  }
-
-  private void transition(SplitTransactionPhase nextPhase) throws IOException {
-    transition(nextPhase, false);
-  }
-
-  private void transition(SplitTransactionPhase nextPhase, boolean isRollback)
-      throws IOException {
-    if (!isRollback) {
-      // Add to the journal first, because if the listener throws an exception
-      // we need to roll back starting at 'nextPhase'
-      this.journal.add(new JournalEntryImpl(nextPhase));
-    }
-    for (int i = 0; i < listeners.size(); i++) {
-      TransactionListener listener = listeners.get(i);
-      if (!isRollback) {
-        listener.transition(this, currentPhase, nextPhase);
-      } else {
-        listener.rollback(this, currentPhase, nextPhase);
-      }
-    }
-    currentPhase = nextPhase;
-  }
-
-  @Override
-  public boolean prepare() throws IOException {
-    if (!this.parent.isSplittable()) return false;
-    // Split key can be null if this region is unsplittable; i.e. has refs.
-    if (this.splitrow == null) return false;
-    HRegionInfo hri = this.parent.getRegionInfo();
-    parent.prepareToSplit();
-    // Check splitrow.
-    byte [] startKey = hri.getStartKey();
-    byte [] endKey = hri.getEndKey();
-    if (Bytes.equals(startKey, splitrow) ||
-        !this.parent.getRegionInfo().containsRow(splitrow)) {
-      LOG.info("Split row is not inside region key range or is equal to " +
-          "startkey: " + Bytes.toStringBinary(this.splitrow));
-      return false;
-    }
-    long rid = getDaughterRegionIdTimestamp(hri);
-    this.hri_a = new HRegionInfo(hri.getTable(), startKey, this.splitrow, false, rid);
-    this.hri_b = new HRegionInfo(hri.getTable(), this.splitrow, endKey, false, rid);
-
-    transition(SplitTransactionPhase.PREPARED);
-
-    return true;
-  }
-
-  /**
-   * Calculate daughter regionid to use.
-   * @param hri Parent {@link HRegionInfo}
-   * @return Daughter region id (timestamp) to use.
-   */
-  private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) {
-    long rid = EnvironmentEdgeManager.currentTime();
-    // Regionid is timestamp.  Can't be less than that of parent else will insert
-    // at wrong location in hbase:meta (See HBASE-710).
-    if (rid < hri.getRegionId()) {
-      LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() +
-        " but current time here is " + rid);
-      rid = hri.getRegionId() + 1;
-    }
-    return rid;
-  }
-
-  private static IOException closedByOtherException = new IOException(
-      "Failed to close region: already closed by another thread");
-
-  /**
-   * Prepare the regions and region files.
-   * @param server Hosting server instance.  Can be null when testing (won't try
-   * and update in zk if a null server)
-   * @param services Used to online/offline regions.
-   * @param user
-   * @throws IOException If thrown, transaction failed.
-   *    Call {@link #rollback(Server, RegionServerServices)}
-   * @return Regions created
-   */
-  @VisibleForTesting
-  PairOfSameType<Region> createDaughters(final Server server,
-      final RegionServerServices services, User user) throws IOException {
-    LOG.info("Starting split of region " + this.parent);
-    if ((server != null && server.isStopped()) ||
-        (services != null && services.isStopping())) {
-      throw new IOException("Server is stopped or stopping");
-    }
-    assert !this.parent.lock.writeLock().isHeldByCurrentThread():
-      "Unsafe to hold write lock while performing RPCs";
-
-    transition(SplitTransactionPhase.BEFORE_PRE_SPLIT_HOOK);
-
-    // Coprocessor callback
-    if (this.parent.getCoprocessorHost() != null) {
-      // TODO: Remove one of these
-      parent.getCoprocessorHost().preSplit(user);
-      parent.getCoprocessorHost().preSplit(splitrow, user);
-    }
-
-    transition(SplitTransactionPhase.AFTER_PRE_SPLIT_HOOK);
-
-    // If true, no cluster to write meta edits to or to update znodes in.
-    boolean testing = server == null? true:
-        server.getConfiguration().getBoolean("hbase.testing.nocluster", false);
-    this.fileSplitTimeout = testing ? this.fileSplitTimeout :
-        server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
-          this.fileSplitTimeout);
-
-    PairOfSameType<Region> daughterRegions = stepsBeforePONR(server, services, testing);
-
-    final List<Mutation> metaEntries = new ArrayList<Mutation>();
-    boolean ret = false;
-    if (this.parent.getCoprocessorHost() != null) {
-      ret = parent.getCoprocessorHost().preSplitBeforePONR(splitrow, metaEntries, user);
-      if (ret) {
-          throw new IOException("Coprocessor bypassing region "
-            + parent.getRegionInfo().getRegionNameAsString() + " split.");
-      }
-      try {
-        for (Mutation p : metaEntries) {
-          HRegionInfo.parseRegionName(p.getRow());
-        }
-      } catch (IOException e) {
-        LOG.error("Row key of mutation from coprocessor is not parsable as region name."
-            + "Mutations from coprocessor should only for hbase:meta table.");
-        throw e;
-      }
-    }
-
-    // This is the point of no return.  Adding subsequent edits to .META. as we
-    // do below when we do the daughter opens adding each to .META. can fail in
-    // various interesting ways the most interesting of which is a timeout
-    // BUT the edits all go through (See HBASE-3872).  IF we reach the PONR
-    // then subsequent failures need to crash out this regionserver; the
-    // server shutdown processing should be able to fix-up the incomplete split.
-    // The offlined parent will have the daughters as extra columns.  If
-    // we leave the daughter regions in place and do not remove them when we
-    // crash out, then they will have their references to the parent in place
-    // still and the server shutdown fixup of .META. will point to these
-    // regions.
-    // We should add PONR JournalEntry before offlineParentInMeta,so even if
-    // OfflineParentInMeta timeout,this will cause regionserver exit,and then
-    // master ServerShutdownHandler will fix daughter & avoid data loss. (See
-    // HBase-4562).
-
-    transition(SplitTransactionPhase.PONR);
-
-    // Edit parent in meta.  Offlines parent region and adds splita and splitb
-    // as an atomic update. See HBASE-7721. This update to META makes the region
-    // will determine whether the region is split or not in case of failures.
-    // If it is successful, master will roll-forward, if not, master will rollback
-    // and assign the parent region.
-    if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_PONR,
-        parent.getRegionInfo(), hri_a, hri_b)) {
-      // Passed PONR, let SSH clean it up
-      throw new IOException("Failed to notify master that split passed PONR: "
-        + parent.getRegionInfo().getRegionNameAsString());
-    }
-    return daughterRegions;
-  }
-
-  @VisibleForTesting
-  Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
-            .toBytes(sn.getHostAndPort()));
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
-            .getStartcode()));
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
-    return p;
-  }
-
-  @VisibleForTesting
-  public PairOfSameType<Region> stepsBeforePONR(final Server server,
-      final RegionServerServices services, boolean testing) throws IOException {
-    if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
-        parent.getRegionInfo(), hri_a, hri_b)) {
-      throw new IOException("Failed to get ok from master to split "
-        + parent.getRegionInfo().getRegionNameAsString());
-    }
-
-    transition(SplitTransactionPhase.SET_SPLITTING);
-
-    this.parent.getRegionFileSystem().createSplitsDir();
-
-    transition(SplitTransactionPhase.CREATE_SPLIT_DIR);
-
-    Map<byte[], List<StoreFile>> hstoreFilesToSplit = null;
-    Exception exceptionToThrow = null;
-    try{
-      hstoreFilesToSplit = this.parent.close(false);
-    } catch (Exception e) {
-      exceptionToThrow = e;
-    }
-    if (exceptionToThrow == null && hstoreFilesToSplit == null) {
-      // The region was closed by a concurrent thread.  We can't continue
-      // with the split, instead we must just abandon the split.  If we
-      // reopen or split this could cause problems because the region has
-      // probably already been moved to a different server, or is in the
-      // process of moving to a different server.
-      exceptionToThrow = closedByOtherException;
-    }
-    if (exceptionToThrow != closedByOtherException) {
-      transition(SplitTransactionPhase.CLOSED_PARENT_REGION);
-    }
-    if (exceptionToThrow != null) {
-      if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow;
-      throw new IOException(exceptionToThrow);
-    }
-    if (!testing) {
-      services.removeFromOnlineRegions(this.parent, null);
-    }
-
-    transition(SplitTransactionPhase.OFFLINED_PARENT);
-
-    // TODO: If splitStoreFiles were multithreaded would we complete steps in
-    // less elapsed time?  St.Ack 20100920
-    //
-    // splitStoreFiles creates daughter region dirs under the parent splits dir
-    // Nothing to unroll here if failure -- clean up of CREATE_SPLIT_DIR will
-    // clean this up.
-    Pair<Integer, Integer> expectedReferences = splitStoreFiles(hstoreFilesToSplit);
-
-    // Log to the journal that we are creating region A, the first daughter
-    // region.  We could fail halfway through.  If we do, we could have left
-    // stuff in fs that needs cleanup -- a storefile or two.  Thats why we
-    // add entry to journal BEFORE rather than AFTER the change.
-
-    transition(SplitTransactionPhase.STARTED_REGION_A_CREATION);
-
-    assertReferenceFileCount(expectedReferences.getFirst(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_a));
-    HRegion a = this.parent.createDaughterRegionFromSplits(this.hri_a);
-    assertReferenceFileCount(expectedReferences.getFirst(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_a.getEncodedName()));
-
-    // Ditto
-
-    transition(SplitTransactionPhase.STARTED_REGION_B_CREATION);
-
-    assertReferenceFileCount(expectedReferences.getSecond(),
-        this.parent.getRegionFileSystem().getSplitsDir(this.hri_b));
-    HRegion b = this.parent.createDaughterRegionFromSplits(this.hri_b);
-    assertReferenceFileCount(expectedReferences.getSecond(),
-        new Path(this.parent.getRegionFileSystem().getTableDir(), this.hri_b.getEncodedName()));
-
-    return new PairOfSameType<Region>(a, b);
-  }
-
-  @VisibleForTesting
-  void assertReferenceFileCount(int expectedReferenceFileCount, Path dir)
-      throws IOException {
-    if (expectedReferenceFileCount != 0 &&
-        expectedReferenceFileCount != FSUtils.getRegionReferenceFileCount(parent.getFilesystem(),
-          dir)) {
-      throw new IOException("Failing split. Expected reference file count isn't equal.");
-    }
-  }
-
-  /**
-   * Perform time consuming opening of the daughter regions.
-   * @param server Hosting server instance.  Can be null when testing
-   * @param services Used to online/offline regions.
-   * @param a first daughter region
-   * @param a second daughter region
-   * @throws IOException If thrown, transaction failed.
-   *          Call {@link #rollback(Server, RegionServerServices)}
-   */
-  @VisibleForTesting
-  void openDaughters(final Server server, final RegionServerServices services, Region a,
-      Region b) throws IOException {
-    boolean stopped = server != null && server.isStopped();
-    boolean stopping = services != null && services.isStopping();
-    // TODO: Is this check needed here?
-    if (stopped || stopping) {
-      LOG.info("Not opening daughters " +
-          b.getRegionInfo().getRegionNameAsString() +
-          " and " +
-          a.getRegionInfo().getRegionNameAsString() +
-          " because stopping=" + stopping + ", stopped=" + stopped);
-    } else {
-      // Open daughters in parallel.
-      DaughterOpener aOpener = new DaughterOpener(server, a);
-      DaughterOpener bOpener = new DaughterOpener(server, b);
-      aOpener.start();
-      bOpener.start();
-      try {
-        aOpener.join();
-        if (aOpener.getException() == null) {
-          transition(SplitTransactionPhase.OPENED_REGION_A);
-        }
-        bOpener.join();
-        if (bOpener.getException() == null) {
-          transition(SplitTransactionPhase.OPENED_REGION_B);
-        }
-      } catch (InterruptedException e) {
-        throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-      }
-      if (aOpener.getException() != null) {
-        throw new IOException("Failed " +
-          aOpener.getName(), aOpener.getException());
-      }
-      if (bOpener.getException() != null) {
-        throw new IOException("Failed " +
-          bOpener.getName(), bOpener.getException());
-      }
-      if (services != null) {
-        if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
-            parent.getRegionInfo(), hri_a, hri_b)) {
-          throw new IOException("Failed to report split region to master: "
-            + parent.getRegionInfo().getShortNameToLog());
-        }
-        // Should add it to OnlineRegions
-        services.addToOnlineRegions(b);
-        services.addToOnlineRegions(a);
-      }
-    }
-  }
-
-  @Override
-  public PairOfSameType<Region> execute(final Server server,
-    final RegionServerServices services)
-        throws IOException {
-    if (User.isHBaseSecurityEnabled(parent.getBaseConf())) {
-      LOG.warn("Should use execute(Server, RegionServerServices, User)");
-    }
-    return execute(server, services, null);
-  }
-
-  @Override
-  public PairOfSameType<Region> execute(final Server server, final RegionServerServices services,
-    User user) throws IOException {
-    this.server = server;
-    this.rsServices = services;
-    PairOfSameType<Region> regions = createDaughters(server, services, user);
-    stepsAfterPONR(server, services, regions, user);
-    transition(SplitTransactionPhase.COMPLETED);
-    return regions;
-  }
-
-  @VisibleForTesting
-  void stepsAfterPONR(final Server server,
-      final RegionServerServices services, final PairOfSameType<Region> regions, User user)
-      throws IOException {
-    if (this.parent.getCoprocessorHost() != null) {
-      parent.getCoprocessorHost().preSplitAfterPONR(user);
-    }
-
-    openDaughters(server, services, regions.getFirst(), regions.getSecond());
-
-    transition(SplitTransactionPhase.BEFORE_POST_SPLIT_HOOK);
-
-    // Coprocessor callback
-    if (parent.getCoprocessorHost() != null) {
-      this.parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond(), user);
-    }
-
-    transition(SplitTransactionPhase.AFTER_POST_SPLIT_HOOK);
-  }
-
-  /*
-   * Open daughter region in its own thread.
-   * If we fail, abort this hosting server.
-   */
-  private class DaughterOpener extends HasThread {
-    private final Server server;
-    private final Region r;
-    private Throwable t = null;
-
-    DaughterOpener(final Server s, final Region r) {
-      super((s == null? "null-services": s.getServerName()) +
-        "-daughterOpener=" + r.getRegionInfo().getEncodedName());
-      setDaemon(true);
-      this.server = s;
-      this.r = r;
-    }
-
-    /**
-     * @return Null if open succeeded else exception that causes us fail open.
-     * Call it after this thread exits else you may get wrong view on result.
-     */
-    Throwable getException() {
-      return this.t;
-    }
-
-    @Override
-    public void run() {
-      try {
-        openDaughterRegion(this.server, r);
-      } catch (Throwable t) {
-        this.t = t;
-      }
-    }
-  }
-
-  /**
-   * Open daughter regions, add them to online list and update meta.
-   * @param server
-   * @param daughter
-   * @throws IOException
-   * @throws KeeperException
-   */
-  @VisibleForTesting
-  void openDaughterRegion(final Server server, final Region daughter)
-      throws IOException, KeeperException {
-    HRegionInfo hri = daughter.getRegionInfo();
-    LoggingProgressable reporter = server == null ? null
-        : new LoggingProgressable(hri, server.getConfiguration().getLong(
-            "hbase.regionserver.split.daughter.open.log.interval", 10000));
-    ((HRegion)daughter).openHRegion(reporter);
-  }
-
-  static class LoggingProgressable implements CancelableProgressable {
-    private final HRegionInfo hri;
-    private long lastLog = -1;
-    private final long interval;
-
-    LoggingProgressable(final HRegionInfo hri, final long interval) {
-      this.hri = hri;
-      this.interval = interval;
-    }
-
-    @Override
-    public boolean progress() {
-      long now = EnvironmentEdgeManager.currentTime();
-      if (now - lastLog > this.interval) {
-        LOG.info("Opening " + this.hri.getRegionNameAsString());
-        this.lastLog = now;
-      }
-      return true;
-    }
-  }
-
-  /**
-   * Creates reference files for top and bottom half of the
-   * @param hstoreFilesToSplit map of store files to create half file references for.
-   * @return the number of reference files that were created.
-   * @throws IOException
-   */
-  private Pair<Integer, Integer> splitStoreFiles(
-      final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
-      throws IOException {
-    if (hstoreFilesToSplit == null) {
-      // Could be null because close didn't succeed -- for now consider it fatal
-      throw new IOException("Close returned empty list of StoreFiles");
-    }
-    // The following code sets up a thread pool executor with as many slots as
-    // there's files to split. It then fires up everything, waits for
-    // completion and finally checks for any exception
-    int nbFiles = 0;
-    for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
-        nbFiles += entry.getValue().size();
-    }
-    if (nbFiles == 0) {
-      // no file needs to be splitted.
-      return new Pair<Integer, Integer>(0,0);
-    }
-    // Default max #threads to use is the smaller of table's configured number of blocking store
-    // files or the available number of logical cores.
-    int defMaxThreads = Math.min(parent.conf.getInt(HStore.BLOCKING_STOREFILES_KEY,
-                HStore.DEFAULT_BLOCKING_STOREFILE_COUNT),
-            Runtime.getRuntime().availableProcessors());
-    // Max #threads is the smaller of the number of storefiles or the default max determined above.
-    int maxThreads = Math.min(parent.conf.getInt(HConstants.REGION_SPLIT_THREADS_MAX,
-                defMaxThreads), nbFiles);
-    LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent +
-            " using " + maxThreads + " threads");
-    ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
-    builder.setNameFormat("StoreFileSplitter-%1$d");
-    ThreadFactory factory = builder.build();
-    ThreadPoolExecutor threadPool =
-      (ThreadPoolExecutor) Executors.newFixedThreadPool(maxThreads, factory);
-    List<Future<Pair<Path,Path>>> futures = new ArrayList<Future<Pair<Path,Path>>> (nbFiles);
-
-    // Split each store file.
-    for (Map.Entry<byte[], List<StoreFile>> entry: hstoreFilesToSplit.entrySet()) {
-      for (StoreFile sf: entry.getValue()) {
-        StoreFileSplitter sfs = new StoreFileSplitter(entry.getKey(), sf);
-        futures.add(threadPool.submit(sfs));
-      }
-    }
-    // Shutdown the pool
-    threadPool.shutdown();
-
-    // Wait for all the tasks to finish
-    try {
-      boolean stillRunning = !threadPool.awaitTermination(
-          this.fileSplitTimeout, TimeUnit.MILLISECONDS);
-      if (stillRunning) {
-        threadPool.shutdownNow();
-        // wait for the thread to shutdown completely.
-        while (!threadPool.isTerminated()) {
-          Thread.sleep(50);
-        }
-        throw new IOException("Took too long to split the" +
-            " files and create the references, aborting split");
-      }
-    } catch (InterruptedException e) {
-      throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-    }
-
-    int created_a = 0;
-    int created_b = 0;
-    // Look for any exception
-    for (Future<Pair<Path, Path>> future : futures) {
-      try {
-        Pair<Path, Path> p = future.get();
-        created_a += p.getFirst() != null ? 1 : 0;
-        created_b += p.getSecond() != null ? 1 : 0;
-      } catch (InterruptedException e) {
-        throw (InterruptedIOException) new InterruptedIOException().initCause(e);
-      } catch (ExecutionException e) {
-        throw new IOException(e);
-      }
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Split storefiles for region " + this.parent + " Daughter A: " + created_a
-          + " storefiles, Daughter B: " + created_b + " storefiles.");
-    }
-    return new Pair<Integer, Integer>(created_a, created_b);
-  }
-
-  private Pair<Path, Path> splitStoreFile(final byte[] family, final StoreFile sf)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-        LOG.debug("Splitting started for store file: " + sf.getPath() + " for region: " +
-                  this.parent);
-    }
-    HRegionFileSystem fs = this.parent.getRegionFileSystem();
-    String familyName = Bytes.toString(family);
-    Path path_a =
-        fs.splitStoreFile(this.hri_a, familyName, sf, this.splitrow, false,
-          this.parent.getSplitPolicy());
-    Path path_b =
-        fs.splitStoreFile(this.hri_b, familyName, sf, this.splitrow, true,
-          this.parent.getSplitPolicy());
-    if (LOG.isDebugEnabled()) {
-        LOG.debug("Splitting complete for store file: " + sf.getPath() + " for region: " +
-                  this.parent);
-    }
-    return new Pair<Path,Path>(path_a, path_b);
-  }
-
-  /**
-   * Utility class used to do the file splitting / reference writing
-   * in parallel instead of sequentially.
-   */
-  private class StoreFileSplitter implements Callable<Pair<Path,Path>> {
-    private final byte[] family;
-    private final StoreFile sf;
-
-    /**
-     * Constructor that takes what it needs to split
-     * @param family Family that contains the store file
-     * @param sf which file
-     */
-    public StoreFileSplitter(final byte[] family, final StoreFile sf) {
-      this.sf = sf;
-      this.family = family;
-    }
-
-    public Pair<Path,Path> call() throws IOException {
-      return splitStoreFile(family, sf);
-    }
-  }
-
-  @Override
-  public boolean rollback(final Server server, final RegionServerServices services)
-      throws IOException {
-    if (User.isHBaseSecurityEnabled(parent.getBaseConf())) {
-      LOG.warn("Should use rollback(Server, RegionServerServices, User)");
-    }
-    return rollback(server, services, null);
-  }
-
-  @Override
-  public boolean rollback(final Server server, final RegionServerServices services, User user)
-      throws IOException {
-    this.server = server;
-    this.rsServices = services;
-    // Coprocessor callback
-    if (this.parent.getCoprocessorHost() != null) {
-      this.parent.getCoprocessorHost().preRollBackSplit(user);
-    }
-
-    boolean result = true;
-    ListIterator<JournalEntry> iterator =
-      this.journal.listIterator(this.journal.size());
-    // Iterate in reverse.
-    while (iterator.hasPrevious()) {
-      JournalEntry je = iterator.previous();
-
-      transition(je.getPhase(), true);
-
-      switch (je.getPhase()) {
-
-      case SET_SPLITTING:
-        if (services != null
-            && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED,
-                parent.getRegionInfo(), hri_a, hri_b)) {
-          return false;
-        }
-        break;
-
-      case CREATE_SPLIT_DIR:
-        this.parent.writestate.writesEnabled = true;
-        this.parent.getRegionFileSystem().cleanupSplitsDir();
-        break;
-
-      case CLOSED_PARENT_REGION:
-        try {
-          // So, this returns a seqid but if we just closed and then reopened, we
-          // should be ok. On close, we flushed using sequenceid obtained from
-          // hosting regionserver so no need to propagate the sequenceid returned
-          // out of initialize below up into regionserver as we normally do.
-          // TODO: Verify.
-          this.parent.initialize();
-        } catch (IOException e) {
-          LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
-            parent.getRegionInfo().getRegionNameAsString(), e);
-          throw new RuntimeException(e);
-        }
-        break;
-
-      case STARTED_REGION_A_CREATION:
-        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_a);
-        break;
-
-      case STARTED_REGION_B_CREATION:
-        this.parent.getRegionFileSystem().cleanupDaughterRegion(this.hri_b);
-        break;
-
-      case OFFLINED_PARENT:
-        if (services != null) services.addToOnlineRegions(this.parent);
-        break;
-
-      case PONR:
-        // We got to the point-of-no-return so we need to just abort. Return
-        // immediately.  Do not clean up created daughter regions.  They need
-        // to be in place so we don't delete the parent region mistakenly.
-        // See HBASE-3872.
-        return false;
-
-      // Informational only cases
-      case STARTED:
-      case PREPARED:
-      case BEFORE_PRE_SPLIT_HOOK:
-      case AFTER_PRE_SPLIT_HOOK:
-      case BEFORE_POST_SPLIT_HOOK:
-      case AFTER_POST_SPLIT_HOOK:
-      case OPENED_REGION_A:
-      case OPENED_REGION_B:
-      case COMPLETED:
-        break;
-
-      default:
-        throw new RuntimeException("Unhandled journal entry: " + je);
-      }
-    }
-    // Coprocessor callback
-    if (this.parent.getCoprocessorHost() != null) {
-      this.parent.getCoprocessorHost().postRollBackSplit(user);
-    }
-    return result;
-  }
-
-  /* package */ HRegionInfo getFirstDaughter() {
-    return hri_a;
-  }
-
-  /* package */ HRegionInfo getSecondDaughter() {
-    return hri_b;
-  }
-
-  @Override
-  public List<JournalEntry> getJournal() {
-    return journal;
-  }
-
-  @Override
-  public SplitTransaction registerTransactionListener(TransactionListener listener) {
-    listeners.add(listener);
-    return this;
-  }
-
-  @Override
-  public Server getServer() {
-    return server;
-  }
-
-  @Override
-  public RegionServerServices getRegionServerServices() {
-    return rsServices;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
index 465853a..a479497 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorInterface.java
@@ -27,8 +27,6 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Mockito.when;
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
@@ -48,7 +46,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Scan;
@@ -59,18 +56,14 @@ import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.regionserver.ScanType;
 import org.apache.hadoop.hbase.regionserver.ScannerContext;
-import org.apache.hadoop.hbase.regionserver.SplitTransaction;
-import org.apache.hadoop.hbase.regionserver.SplitTransactionFactory;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
-import org.mockito.Mockito;
 
 @Category({CoprocessorTests.class, SmallTests.class})
 public class TestCoprocessorInterface {
@@ -163,7 +156,6 @@ public class TestCoprocessorInterface {
     private boolean postCompactCalled;
     private boolean preFlushCalled;
     private boolean postFlushCalled;
-    private boolean postSplitCalled;
     private ConcurrentMap<String, Object> sharedData;
 
     @Override
@@ -215,10 +207,6 @@ public class TestCoprocessorInterface {
     public void postFlush(ObserverContext<RegionCoprocessorEnvironment> e) {
       postFlushCalled = true;
     }
-    @Override
-    public void postSplit(ObserverContext<RegionCoprocessorEnvironment> e, Region l, Region r) {
-      postSplitCalled = true;
-    }
 
     @Override
     public RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> e,
@@ -244,9 +232,6 @@ public class TestCoprocessorInterface {
     boolean wasCompacted() {
       return (preCompactCalled && postCompactCalled);
     }
-    boolean wasSplit() {
-      return postSplitCalled;
-    }
     Map<String, Object> getSharedData() {
       return sharedData;
     }
@@ -281,7 +266,7 @@ public class TestCoprocessorInterface {
     TableName tableName = TableName.valueOf(name.getMethodName());
     byte [][] families = { fam1, fam2, fam3 };
 
-    Configuration hc = initSplit();
+    Configuration hc = initConfig();
     Region region = initHRegion(tableName, name.getMethodName(), hc,
       new Class<?>[]{}, families);
 
@@ -292,15 +277,11 @@ public class TestCoprocessorInterface {
 
     region.compact(false);
 
-    byte [] splitRow = ((HRegion)region).checkSplit();
-    assertNotNull(splitRow);
-    Region [] regions = split(region, splitRow);
-    for (int i = 0; i < regions.length; i++) {
-      regions[i] = reopenRegion(regions[i], CoprocessorImpl.class, CoprocessorII.class);
-    }
-    Coprocessor c = regions[0].getCoprocessorHost().
+    region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
+
+    Coprocessor c = region.getCoprocessorHost().
         findCoprocessor(CoprocessorImpl.class.getName());
-    Coprocessor c2 = regions[0].getCoprocessorHost().
+    Coprocessor c2 = region.getCoprocessorHost().
         findCoprocessor(CoprocessorII.class.getName());
     Object o = ((CoprocessorImpl)c).getSharedData().get("test1");
     Object o2 = ((CoprocessorII)c2).getSharedData().get("test2");
@@ -308,39 +289,33 @@ public class TestCoprocessorInterface {
     assertNotNull(o2);
     // to coprocessors get different sharedDatas
     assertFalse(((CoprocessorImpl)c).getSharedData() == ((CoprocessorII)c2).getSharedData());
-    for (int i = 1; i < regions.length; i++) {
-      c = regions[i].getCoprocessorHost().
-          findCoprocessor(CoprocessorImpl.class.getName());
-      c2 = regions[i].getCoprocessorHost().
-          findCoprocessor(CoprocessorII.class.getName());
-      // make sure that all coprocessor of a class have identical sharedDatas
-      assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
-      assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
-    }
+    c = region.getCoprocessorHost().findCoprocessor(CoprocessorImpl.class.getName());
+    c2 = region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName());
+    // make sure that all coprocessor of a class have identical sharedDatas
+    assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
+    assertTrue(((CoprocessorII)c2).getSharedData().get("test2") == o2);
+
     // now have all Environments fail
-    for (int i = 0; i < regions.length; i++) {
-      try {
-        byte [] r = regions[i].getRegionInfo().getStartKey();
-        if (r == null || r.length <= 0) {
-          // Its the start row.  Can't ask for null.  Ask for minimal key instead.
-          r = new byte [] {0};
-        }
-        Get g = new Get(r);
-        regions[i].get(g);
-        fail();
-      } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
+    try {
+      byte [] r = region.getRegionInfo().getStartKey();
+      if (r == null || r.length <= 0) {
+        // Its the start row.  Can't ask for null.  Ask for minimal key instead.
+        r = new byte [] {0};
       }
-      assertNull(regions[i].getCoprocessorHost().
-          findCoprocessor(CoprocessorII.class.getName()));
+      Get g = new Get(r);
+      region.get(g);
+      fail();
+    } catch (org.apache.hadoop.hbase.DoNotRetryIOException xc) {
     }
-    c = regions[0].getCoprocessorHost().
+    assertNull(region.getCoprocessorHost().findCoprocessor(CoprocessorII.class.getName()));
+    c = region.getCoprocessorHost().
         findCoprocessor(CoprocessorImpl.class.getName());
     assertTrue(((CoprocessorImpl)c).getSharedData().get("test1") == o);
     c = c2 = null;
     // perform a GC
     System.gc();
     // reopen the region
-    region = reopenRegion(regions[0], CoprocessorImpl.class, CoprocessorII.class);
+    region = reopenRegion(region, CoprocessorImpl.class, CoprocessorII.class);
     c = region.getCoprocessorHost().
         findCoprocessor(CoprocessorImpl.class.getName());
     // CPimpl is unaffected, still the same reference
@@ -359,7 +334,7 @@ public class TestCoprocessorInterface {
     TableName tableName = TableName.valueOf(name.getMethodName());
     byte [][] families = { fam1, fam2, fam3 };
 
-    Configuration hc = initSplit();
+    Configuration hc = initConfig();
     Region region = initHRegion(tableName, name.getMethodName(), hc,
       new Class<?>[]{CoprocessorImpl.class}, families);
     for (int i = 0; i < 3; i++) {
@@ -369,42 +344,23 @@ public class TestCoprocessorInterface {
 
     region.compact(false);
 
-    byte [] splitRow = ((HRegion)region).checkSplit();
-
-    assertNotNull(splitRow);
-    Region [] regions = split(region, splitRow);
-    for (int i = 0; i < regions.length; i++) {
-      regions[i] = reopenRegion(regions[i], CoprocessorImpl.class);
-    }
-    HBaseTestingUtility.closeRegionAndWAL(region);
-    Coprocessor c = region.getCoprocessorHost().
-      findCoprocessor(CoprocessorImpl.class.getName());
-
     // HBASE-4197
     Scan s = new Scan();
-    RegionScanner scanner = regions[0].getCoprocessorHost().postScannerOpen(s, regions[0].getScanner(s));
+    RegionScanner scanner = region.getCoprocessorHost().postScannerOpen(s, region.getScanner(s));
     assertTrue(scanner instanceof CustomScanner);
     // this would throw an exception before HBASE-4197
     scanner.next(new ArrayList<Cell>());
 
+    HBaseTestingUtility.closeRegionAndWAL(region);
+    Coprocessor c = region.getCoprocessorHost().
+      findCoprocessor(CoprocessorImpl.class.getName());
+
     assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
     assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
     assertTrue(((CoprocessorImpl)c).wasOpened());
     assertTrue(((CoprocessorImpl)c).wasClosed());
     assertTrue(((CoprocessorImpl)c).wasFlushed());
     assertTrue(((CoprocessorImpl)c).wasCompacted());
-    assertTrue(((CoprocessorImpl)c).wasSplit());
-
-    for (int i = 0; i < regions.length; i++) {
-      HBaseTestingUtility.closeRegionAndWAL(regions[i]);
-      c = region.getCoprocessorHost()
-            .findCoprocessor(CoprocessorImpl.class.getName());
-      assertTrue("Coprocessor not started", ((CoprocessorImpl)c).wasStarted());
-      assertTrue("Coprocessor not stopped", ((CoprocessorImpl)c).wasStopped());
-      assertTrue(((CoprocessorImpl)c).wasOpened());
-      assertTrue(((CoprocessorImpl)c).wasClosed());
-      assertTrue(((CoprocessorImpl)c).wasCompacted());
-    }
   }
 
   Region reopenRegion(final Region closedRegion, Class<?> ... implClasses)
@@ -461,7 +417,7 @@ public class TestCoprocessorInterface {
     return r;
   }
 
-  Configuration initSplit() {
+  private Configuration initConfig() {
     // Always compact if there is more than one store file.
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2);
     // Make lease timeout longer, lease checks less frequent
@@ -480,37 +436,4 @@ public class TestCoprocessorInterface {
 
     return TEST_UTIL.getConfiguration();
   }
-
-  private Region [] split(final Region r, final byte [] splitRow) throws IOException {
-    Region[] regions = new Region[2];
-
-    SplitTransaction st = new SplitTransactionFactory(TEST_UTIL.getConfiguration())
-      .create(r, splitRow);
-    int i = 0;
-
-    if (!st.prepare()) {
-      // test fails.
-      assertTrue(false);
-    }
-    try {
-      Server mockServer = Mockito.mock(Server.class);
-      when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-      PairOfSameType<Region> daughters = st.execute(mockServer, null);
-      for (Region each_daughter: daughters) {
-        regions[i] = each_daughter;
-        i++;
-      }
-    } catch (IOException ioe) {
-      LOG.info("Split transaction of " + r.getRegionInfo().getRegionNameAsString() +
-          " failed:" + ioe.getMessage());
-      assertTrue(false);
-    } catch (RuntimeException e) {
-      LOG.info("Failed rollback of failed split of " +
-          r.getRegionInfo().getRegionNameAsString() + e.getMessage());
-    }
-
-    assertTrue(i == 2);
-    return regions;
-  }
-
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index c973471..d40a684 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -19,9 +19,6 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import static org.apache.hadoop.hbase.HBaseTestingUtility.COLUMNS;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.FIRST_CHAR;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.LAST_CHAR;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
 import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
@@ -43,7 +40,6 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.when;
 
-import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 
@@ -85,7 +81,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
@@ -159,7 +154,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
 import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
-import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.wal.AbstractFSWALProvider;
 import org.apache.hadoop.hbase.wal.FaultyFSLog;
@@ -2646,45 +2640,6 @@ public class TestHRegion {
     }
   }
 
-  /**
-   * @param parent
-   *          Region to split.
-   * @param midkey
-   *          Key to split around.
-   * @return The Regions we created.
-   * @throws IOException
-   */
-  HRegion[] splitRegion(final HRegion parent, final byte[] midkey) throws IOException {
-    PairOfSameType<Region> result = null;
-    SplitTransactionImpl st = new SplitTransactionImpl(parent, midkey);
-    // If prepare does not return true, for some reason -- logged inside in
-    // the prepare call -- we are not ready to split just now. Just return.
-    if (!st.prepare()) {
-      parent.clearSplit();
-      return null;
-    }
-    try {
-      result = st.execute(null, null);
-    } catch (IOException ioe) {
-      try {
-        LOG.info("Running rollback of failed split of " +
-          parent.getRegionInfo().getRegionNameAsString() + "; " + ioe.getMessage());
-        st.rollback(null, null);
-        LOG.info("Successful rollback of failed split of " +
-          parent.getRegionInfo().getRegionNameAsString());
-        return null;
-      } catch (RuntimeException e) {
-        // If failed rollback, kill this server to avoid having a hole in table.
-        LOG.info("Failed rollback of failed split of " +
-          parent.getRegionInfo().getRegionNameAsString() + " -- aborting server", e);
-      }
-    }
-    finally {
-      parent.clearSplit();
-    }
-    return new HRegion[] { (HRegion)result.getFirst(), (HRegion)result.getSecond() };
-  }
-
   // ////////////////////////////////////////////////////////////////////////////
   // Scanner tests
   // ////////////////////////////////////////////////////////////////////////////
@@ -3517,204 +3472,6 @@ public class TestHRegion {
     HBaseTestingUtility.closeRegionAndWAL(this.region);
     this.region = null;
   }
-  // ////////////////////////////////////////////////////////////////////////////
-  // Split test
-  // ////////////////////////////////////////////////////////////////////////////
-  /**
-   * Splits twice and verifies getting from each of the split regions.
-   *
-   * @throws Exception
-   */
-  @Test
-  public void testBasicSplit() throws Exception {
-    byte[][] families = { fam1, fam2, fam3 };
-
-    Configuration hc = initSplit();
-    // Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-
-    try {
-      LOG.info("" + HBaseTestCase.addContent(region, fam3));
-      region.flush(true);
-      region.compactStores();
-      byte[] splitRow = region.checkSplit();
-      assertNotNull(splitRow);
-      LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion[] regions = splitRegion(region, splitRow);
-      try {
-        // Need to open the regions.
-        // TODO: Add an 'open' to HRegion... don't do open by constructing
-        // instance.
-        for (int i = 0; i < regions.length; i++) {
-          regions[i] = HRegion.openHRegion(regions[i], null);
-        }
-        // Assert can get rows out of new regions. Should be able to get first
-        // row from first region and the midkey from second region.
-        assertGet(regions[0], fam3, Bytes.toBytes(START_KEY));
-        assertGet(regions[1], fam3, splitRow);
-        // Test I can get scanner and that it starts at right place.
-        assertScan(regions[0], fam3, Bytes.toBytes(START_KEY));
-        assertScan(regions[1], fam3, splitRow);
-        // Now prove can't split regions that have references.
-        for (int i = 0; i < regions.length; i++) {
-          // Add so much data to this region, we create a store file that is >
-          // than one of our unsplitable references. it will.
-          for (int j = 0; j < 2; j++) {
-            HBaseTestCase.addContent(regions[i], fam3);
-          }
-          HBaseTestCase.addContent(regions[i], fam2);
-          HBaseTestCase.addContent(regions[i], fam1);
-          regions[i].flush(true);
-        }
-
-        byte[][] midkeys = new byte[regions.length][];
-        // To make regions splitable force compaction.
-        for (int i = 0; i < regions.length; i++) {
-          regions[i].compactStores();
-          midkeys[i] = regions[i].checkSplit();
-        }
-
-        TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
-        // Split these two daughter regions so then I'll have 4 regions. Will
-        // split because added data above.
-        for (int i = 0; i < regions.length; i++) {
-          HRegion[] rs = null;
-          if (midkeys[i] != null) {
-            rs = splitRegion(regions[i], midkeys[i]);
-            for (int j = 0; j < rs.length; j++) {
-              sortedMap.put(Bytes.toString(rs[j].getRegionInfo().getRegionName()),
-                HRegion.openHRegion(rs[j], null));
-            }
-          }
-        }
-        LOG.info("Made 4 regions");
-        // The splits should have been even. Test I can get some arbitrary row
-        // out of each.
-        int interval = (LAST_CHAR - FIRST_CHAR) / 3;
-        byte[] b = Bytes.toBytes(START_KEY);
-        for (HRegion r : sortedMap.values()) {
-          assertGet(r, fam3, b);
-          b[0] += interval;
-        }
-      } finally {
-        for (int i = 0; i < regions.length; i++) {
-          try {
-            regions[i].close();
-          } catch (IOException e) {
-            // Ignore.
-          }
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
-
-  @Test
-  public void testSplitRegion() throws IOException {
-    byte[] qualifier = Bytes.toBytes("qualifier");
-    Configuration hc = initSplit();
-    int numRows = 10;
-    byte[][] families = { fam1, fam3 };
-
-    // Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-
-    // Put data in region
-    int startRow = 100;
-    putData(startRow, numRows, qualifier, families);
-    int splitRow = startRow + numRows;
-    putData(splitRow, numRows, qualifier, families);
-    region.flush(true);
-
-    HRegion[] regions = null;
-    try {
-      regions = splitRegion(region, Bytes.toBytes("" + splitRow));
-      // Opening the regions returned.
-      for (int i = 0; i < regions.length; i++) {
-        regions[i] = HRegion.openHRegion(regions[i], null);
-      }
-      // Verifying that the region has been split
-      assertEquals(2, regions.length);
-
-      // Verifying that all data is still there and that data is in the right
-      // place
-      verifyData(regions[0], startRow, numRows, qualifier, families);
-      verifyData(regions[1], splitRow, numRows, qualifier, families);
-
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
-
-  @Test
-  public void testClearForceSplit() throws IOException {
-    byte[] qualifier = Bytes.toBytes("qualifier");
-    Configuration hc = initSplit();
-    int numRows = 10;
-    byte[][] families = { fam1, fam3 };
-
-    // Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-
-    // Put data in region
-    int startRow = 100;
-    putData(startRow, numRows, qualifier, families);
-    int splitRow = startRow + numRows;
-    byte[] splitRowBytes = Bytes.toBytes("" + splitRow);
-    putData(splitRow, numRows, qualifier, families);
-    region.flush(true);
-
-    HRegion[] regions = null;
-    try {
-      // Set force split
-      region.forceSplit(splitRowBytes);
-      assertTrue(region.shouldForceSplit());
-      // Split point should be the force split row
-      assertTrue(Bytes.equals(splitRowBytes, region.checkSplit()));
-
-      // Add a store that has references.
-      HStore storeMock = Mockito.mock(HStore.class);
-      when(storeMock.hasReferences()).thenReturn(true);
-      when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
-      when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
-      when(storeMock.getColumnFamilyName()).thenReturn("cf");
-      region.stores.put(Bytes.toBytes(storeMock.getColumnFamilyName()), storeMock);
-      assertTrue(region.hasReferences());
-
-      // Will not split since the store has references.
-      regions = splitRegion(region, splitRowBytes);
-      assertNull(regions);
-
-      // Region force split should be cleared after the split try.
-      assertFalse(region.shouldForceSplit());
-
-      // Remove the store that has references.
-      region.stores.remove(Bytes.toBytes(storeMock.getColumnFamilyName()));
-      assertFalse(region.hasReferences());
-
-      // Now we can split.
-      regions = splitRegion(region, splitRowBytes);
-
-      // Opening the regions returned.
-      for (int i = 0; i < regions.length; i++) {
-        regions[i] = HRegion.openHRegion(regions[i], null);
-      }
-      // Verifying that the region has been split
-      assertEquals(2, regions.length);
-
-      // Verifying that all data is still there and that data is in the right
-      // place
-      verifyData(regions[0], startRow, numRows, qualifier, families);
-      verifyData(regions[1], splitRow, numRows, qualifier, families);
-
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
 
   /**
    * Flushes the cache in a thread while scanning. The tests verify that the
@@ -5907,103 +5664,6 @@ public class TestHRegion {
   }
 
   @Test
-  public void testSplitRegionWithReverseScan() throws IOException {
-    TableName tableName = TableName.valueOf("testSplitRegionWithReverseScan");
-    byte [] qualifier = Bytes.toBytes("qualifier");
-    Configuration hc = initSplit();
-    int numRows = 3;
-    byte [][] families = {fam1};
-
-    //Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-
-    //Put data in region
-    int startRow = 100;
-    putData(startRow, numRows, qualifier, families);
-    int splitRow = startRow + numRows;
-    putData(splitRow, numRows, qualifier, families);
-    region.flush(true);
-
-    HRegion [] regions = null;
-    try {
-      regions = splitRegion(region, Bytes.toBytes("" + splitRow));
-      //Opening the regions returned.
-      for (int i = 0; i < regions.length; i++) {
-        regions[i] = HRegion.openHRegion(regions[i], null);
-      }
-      //Verifying that the region has been split
-      assertEquals(2, regions.length);
-
-      //Verifying that all data is still there and that data is in the right
-      //place
-      verifyData(regions[0], startRow, numRows, qualifier, families);
-      verifyData(regions[1], splitRow, numRows, qualifier, families);
-
-      //fire the reverse scan1:  top range, and larger than the last row
-      Scan scan = new Scan(Bytes.toBytes(String.valueOf(startRow + 10 * numRows)));
-      scan.setReversed(true);
-      InternalScanner scanner = regions[1].getScanner(scan);
-      List<Cell> currRow = new ArrayList<Cell>();
-      boolean more = false;
-      int verify = startRow + 2 * numRows - 1;
-      do {
-        more = scanner.next(currRow);
-        assertEquals(Bytes.toString(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(),
-          currRow.get(0).getRowLength()), verify + "");
-        verify--;
-        currRow.clear();
-      } while(more);
-      assertEquals(verify, startRow + numRows - 1);
-      scanner.close();
-      //fire the reverse scan2:  top range, and equals to the last row
-      scan = new Scan(Bytes.toBytes(String.valueOf(startRow + 2 * numRows - 1)));
-      scan.setReversed(true);
-      scanner = regions[1].getScanner(scan);
-      verify = startRow + 2 * numRows - 1;
-      do {
-        more = scanner.next(currRow);
-        assertEquals(Bytes.toString(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(),
-          currRow.get(0).getRowLength()), verify + "");
-        verify--;
-        currRow.clear();
-      } while(more);
-      assertEquals(verify, startRow + numRows - 1);
-      scanner.close();
-      //fire the reverse scan3:  bottom range, and larger than the last row
-      scan = new Scan(Bytes.toBytes(String.valueOf(startRow + numRows)));
-      scan.setReversed(true);
-      scanner = regions[0].getScanner(scan);
-      verify = startRow + numRows - 1;
-      do {
-        more = scanner.next(currRow);
-        assertEquals(Bytes.toString(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(),
-          currRow.get(0).getRowLength()), verify + "");
-        verify--;
-        currRow.clear();
-      } while(more);
-      assertEquals(verify, 99);
-      scanner.close();
-      //fire the reverse scan4:  bottom range, and equals to the last row
-      scan = new Scan(Bytes.toBytes(String.valueOf(startRow + numRows - 1)));
-      scan.setReversed(true);
-      scanner = regions[0].getScanner(scan);
-      verify = startRow + numRows - 1;
-      do {
-        more = scanner.next(currRow);
-        assertEquals(Bytes.toString(currRow.get(0).getRowArray(), currRow.get(0).getRowOffset(),
-          currRow.get(0).getRowLength()), verify + "");
-        verify--;
-        currRow.clear();
-      } while(more);
-      assertEquals(verify, startRow - 1);
-      scanner.close();
-    } finally {
-      this.region.close();
-      this.region = null;
-    }
-  }
-
-  @Test
   public void testWriteRequestsCounter() throws IOException {
     byte[] fam = Bytes.toBytes("info");
     byte[][] families = { fam };

http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
index d31d395..5d432c9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.java
@@ -19,30 +19,19 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
-import java.util.TreeMap;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.VerySlowRegionServerTests;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.junit.ClassRule;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestRule;
 
-import static org.apache.hadoop.hbase.HBaseTestingUtility.START_KEY;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam1;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam2;
-import static org.apache.hadoop.hbase.HBaseTestingUtility.fam3;
-import static org.junit.Assert.assertNotNull;
-
 /**
  * A test similar to TestHRegion, but with in-memory flush families.
  * Also checks wal truncation after in-memory compaction.
@@ -71,88 +60,5 @@ public class TestHRegionWithInMemoryFlush extends TestHRegion{
     return TEST_UTIL.createLocalHRegionWithInMemoryFlags(tableName, startKey, stopKey,
         isReadOnly, durability, wal, inMemory, families);
   }
-
-  /**
-   * Splits twice and verifies getting from each of the split regions.
-   *
-   * @throws Exception
-   */
-  @Override
-  public void testBasicSplit() throws Exception {
-    byte[][] families = { fam1, fam2, fam3 };
-
-    Configuration hc = initSplit();
-    // Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-
-    try {
-      LOG.info("" + HBaseTestCase.addContent(region, fam3));
-      region.flush(true);
-      region.compactStores();
-      byte[] splitRow = region.checkSplit();
-      assertNotNull(splitRow);
-      LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion[] regions = splitRegion(region, splitRow);
-      try {
-        // Need to open the regions.
-        // TODO: Add an 'open' to HRegion... don't do open by constructing
-        // instance.
-        for (int i = 0; i < regions.length; i++) {
-          regions[i] = HRegion.openHRegion(regions[i], null);
-        }
-        // Assert can get rows out of new regions. Should be able to get first
-        // row from first region and the midkey from second region.
-        assertGet(regions[0], fam3, Bytes.toBytes(START_KEY));
-        assertGet(regions[1], fam3, splitRow);
-        // Test I can get scanner and that it starts at right place.
-        assertScan(regions[0], fam3, Bytes.toBytes(START_KEY));
-        assertScan(regions[1], fam3, splitRow);
-        // Now prove can't split regions that have references.
-        for (int i = 0; i < regions.length; i++) {
-          // Add so much data to this region, we create a store file that is >
-          // than one of our unsplitable references. it will.
-          for (int j = 0; j < 2; j++) {
-            HBaseTestCase.addContent(regions[i], fam3);
-          }
-          HBaseTestCase.addContent(regions[i], fam2);
-          HBaseTestCase.addContent(regions[i], fam1);
-          regions[i].flush(true);
-        }
-
-        byte[][] midkeys = new byte[regions.length][];
-        // To make regions splitable force compaction.
-        for (int i = 0; i < regions.length; i++) {
-          regions[i].compactStores();
-          midkeys[i] = regions[i].checkSplit();
-        }
-
-        TreeMap<String, HRegion> sortedMap = new TreeMap<String, HRegion>();
-        // Split these two daughter regions so then I'll have 4 regions. Will
-        // split because added data above.
-        for (int i = 0; i < regions.length; i++) {
-          HRegion[] rs = null;
-          if (midkeys[i] != null) {
-            rs = splitRegion(regions[i], midkeys[i]);
-            for (int j = 0; j < rs.length; j++) {
-              sortedMap.put(Bytes.toString(rs[j].getRegionInfo().getRegionName()),
-                  HRegion.openHRegion(rs[j], null));
-            }
-          }
-        }
-        LOG.info("Made 4 regions");
-      } finally {
-        for (int i = 0; i < regions.length; i++) {
-          try {
-            regions[i].close();
-          } catch (IOException e) {
-            // Ignore.
-          }
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
 }
 


[13/50] [abbrv] hbase git commit: HBASE-17483 Add equals/hashcode for OffheapKeyValue (Ram)

Posted by el...@apache.org.
HBASE-17483 Add equals/hashcode for OffheapKeyValue (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/406f66a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/406f66a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/406f66a4

Branch: refs/heads/HBASE-16961
Commit: 406f66a4e89f0d3a52225bce6a5a33cf54b9d75c
Parents: b779143
Author: Ramkrishna <ra...@intel.com>
Authored: Wed Jan 18 17:00:57 2017 +0530
Committer: Ramkrishna <ra...@intel.com>
Committed: Wed Jan 18 17:00:57 2017 +0530

----------------------------------------------------------------------
 .../apache/hadoop/hbase/OffheapKeyValue.java    | 34 ++++++++++++++++++++
 1 file changed, 34 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/406f66a4/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapKeyValue.java
index ab1f6ef..c08927c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/OffheapKeyValue.java
@@ -310,4 +310,38 @@ public class OffheapKeyValue extends ByteBufferCell implements ExtendedCell {
     kv.setSequenceId(this.getSequenceId());
     return kv;
   }
+
+  /**
+   * Needed doing 'contains' on List. Only compares the key portion, not the value.
+   */
+  @Override
+  public boolean equals(Object other) {
+    if (!(other instanceof Cell)) {
+      return false;
+    }
+    return CellUtil.equals(this, (Cell) other);
+  }
+
+  /**
+   * In line with {@link #equals(Object)}, only uses the key portion, not the value.
+   */
+  @Override
+  public int hashCode() {
+    return calculateHashForKey(this);
+  }
+
+  private int calculateHashForKey(ByteBufferCell cell) {
+    int rowHash = ByteBufferUtils.hashCode(cell.getRowByteBuffer(), cell.getRowPosition(),
+      cell.getRowLength());
+    int familyHash = ByteBufferUtils.hashCode(cell.getFamilyByteBuffer(), cell.getFamilyPosition(),
+      cell.getFamilyLength());
+    int qualifierHash = ByteBufferUtils.hashCode(cell.getQualifierByteBuffer(),
+      cell.getQualifierPosition(), cell.getQualifierLength());
+
+    int hash = 31 * rowHash + familyHash;
+    hash = 31 * hash + qualifierHash;
+    hash = 31 * hash + (int) cell.getTimestamp();
+    hash = 31 * hash + cell.getTypeByte();
+    return hash;
+  }
 }


[14/50] [abbrv] hbase git commit: HBASE-17482 mvcc mechanism fails when using mvccPreAssign (Allan Yang)

Posted by el...@apache.org.
HBASE-17482 mvcc mechanism fails when using mvccPreAssign (Allan Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6cbc375a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6cbc375a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6cbc375a

Branch: refs/heads/HBASE-16961
Commit: 6cbc375aa493b159600996b86d3872e9db16f6c6
Parents: 406f66a
Author: tedyu <yu...@gmail.com>
Authored: Wed Jan 18 07:50:41 2017 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Wed Jan 18 07:50:41 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HRegion.java      |  2 +-
 .../hbase/client/TestFromClientSide3.java       | 57 ++++++++++++++++++++
 2 files changed, 58 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cbc375a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b574c50..0b93cb1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3389,7 +3389,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // 1) If the op is in replay mode, FSWALEntry#stampRegionSequenceId won't stamp sequence id.
         // 2) If no WAL, FSWALEntry won't be used
         // we use durability of the original mutation for the mutation passed by CP.
-        boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() == Durability.SKIP_WAL;
+        boolean updateSeqId = replay || batchOp.getMutation(i).getDurability() == Durability.SKIP_WAL || mvccPreAssign;
         if (updateSeqId) {
           this.updateSequenceId(familyMaps[i].values(),
             replay? batchOp.getReplaySequenceId(): writeEntry.getWriteNumber());

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cbc375a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index 9fc20ec..b863b40 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -692,6 +692,63 @@ public class TestFromClientSide3 {
     }
   }
 
+  /**
+   * A test case for issue HBASE-17482
+   * After combile seqid with mvcc readpoint, seqid/mvcc is acquired and stamped
+   * onto cells in the append thread, a countdown latch is used to ensure that happened
+   * before cells can be put into memstore. But the MVCCPreAssign patch(HBASE-16698)
+   * make the seqid/mvcc acquirement in handler thread and stamping in append thread
+   * No countdown latch to assure cells in memstore are stamped with seqid/mvcc.
+   * If cells without mvcc(A.K.A mvcc=0) are put into memstore, then a scanner
+   * with a smaller readpoint can see these data, which disobey the multi version
+   * concurrency control rules.
+   * This test case is to reproduce this scenario.
+   * @throws IOException
+   */
+  @Test
+  public void testMVCCUsingMVCCPreAssign() throws IOException {
+    TableName tableName = TableName.valueOf("testMVCCUsingMVCCPreAssign");
+    HTableDescriptor htd = new HTableDescriptor(tableName);
+    HColumnDescriptor fam = new HColumnDescriptor(FAMILY);
+    htd.addFamily(fam);
+    Admin admin = TEST_UTIL.getHBaseAdmin();
+    admin.createTable(htd);
+    Table table = admin.getConnection().getTable(TableName.valueOf("testMVCCUsingMVCCPreAssign"));
+    //put two row first to init the scanner
+    Put put = new Put(Bytes.toBytes("0"));
+    put.addColumn(FAMILY, Bytes.toBytes( ""), Bytes.toBytes("0"));
+    table.put(put);
+    put = new Put(Bytes.toBytes("00"));
+    put.addColumn(FAMILY, Bytes.toBytes( ""), Bytes.toBytes("0"));
+    table.put(put);
+    Scan scan = new Scan();
+    scan.setTimeRange(0, Long.MAX_VALUE);
+    scan.setCaching(1);
+    ResultScanner scanner = table.getScanner(scan);
+    //the started scanner shouldn't see the rows put below
+    for(int i = 1; i < 1000; i++) {
+      put = new Put(Bytes.toBytes(String.valueOf(i)));
+      put.setDurability(Durability.ASYNC_WAL);
+      put.addColumn(FAMILY, Bytes.toBytes( ""), Bytes.toBytes(i));
+      table.put(put);
+    }
+    int rowNum = 0;
+    for(Result result : scanner) {
+      rowNum++;
+    }
+    //scanner should only see two rows
+    assertEquals(2, rowNum);
+    scanner = table.getScanner(scan);
+    rowNum = 0;
+    for(Result result : scanner) {
+      rowNum++;
+    }
+    // the new scanner should see all rows
+    assertEquals(1001, rowNum);
+
+
+  }
+
   private static void assertNoLocks(final TableName tableName) throws IOException, InterruptedException {
     HRegion region = (HRegion) find(tableName);
     assertEquals(0, region.getLockedRows().size());


[50/50] [abbrv] hbase git commit: HBASE-17000 Implement computation of online region sizes and report to the Master

Posted by el...@apache.org.
HBASE-17000 Implement computation of online region sizes and report to the Master

Includes a trivial implementation of the Master-side collection to
avoid. Only enough to write a test to verify RS collection.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e48b7fa4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e48b7fa4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e48b7fa4

Branch: refs/heads/HBASE-16961
Commit: e48b7fa4a0b4a56900c91fb189ad5be2c15dea01
Parents: 7abe4c1
Author: Josh Elser <el...@apache.org>
Authored: Mon Nov 7 13:46:42 2016 -0500
Committer: Josh Elser <el...@apache.org>
Committed: Mon Jan 23 17:51:57 2017 -0500

----------------------------------------------------------------------
 .../generated/RegionServerStatusProtos.java     | 2071 +++++++++++++++++-
 .../src/main/protobuf/RegionServerStatus.proto  |   22 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   19 +
 .../quotas/FileSystemUtilizationChore.java      |  205 ++
 .../hadoop/hbase/quotas/MasterQuotaManager.java |   15 +
 .../hbase/regionserver/HRegionServer.java       |   72 +
 .../quotas/TestFileSystemUtilizationChore.java  |  357 +++
 .../hadoop/hbase/quotas/TestRegionSizeUse.java  |  194 ++
 .../TestRegionServerRegionSpaceUseReport.java   |   99 +
 9 files changed, 3032 insertions(+), 22 deletions(-)
----------------------------------------------------------------------



[15/50] [abbrv] hbase git commit: HBASE-17486 Tighten the contract for batch client methods (Michael Axiak)

Posted by el...@apache.org.
HBASE-17486 Tighten the contract for batch client methods (Michael Axiak)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f1d0a2b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f1d0a2b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f1d0a2b

Branch: refs/heads/HBASE-16961
Commit: 8f1d0a2b84e4f4dc96406b4748998c7d6eeacbd3
Parents: 6cbc375
Author: Michael Stack <st...@apache.org>
Authored: Wed Jan 18 15:16:52 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Wed Jan 18 15:16:52 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/client/Table.java    | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f1d0a2b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
index 016894b..0f30cb4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Table.java
@@ -112,7 +112,8 @@ public interface Table extends Closeable {
    * @param actions list of Get, Put, Delete, Increment, Append objects
    * @param results Empty Object[], same size as actions. Provides access to partial
    *                results, in case an exception is thrown. A null in the result array means that
-   *                the call for that action failed, even after retries
+   *                the call for that action failed, even after retries. The order of the objects
+   *                in the results array corresponds to the order of actions in the request list.
    * @throws IOException
    * @since 0.90.0
    */
@@ -147,7 +148,8 @@ public interface Table extends Closeable {
    * exist, the {@link Result} instance returned won't contain any {@link
    * org.apache.hadoop.hbase.KeyValue}, as indicated by {@link Result#isEmpty()}. If there are any
    * failures even after retries, there will be a null in the results array for those Gets, AND an
-   * exception will be thrown.
+   * exception will be thrown. The ordering of the Result array corresponds to the order of the
+   * list of Get requests.
    * @throws IOException if a remote or network exception occurs.
    * @since 0.90.0
    */


[45/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
index d14336a..a715115 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/QuotaProtos.java
@@ -239,12 +239,20 @@ public final class QuotaProtos {
      * <code>THROTTLE = 1;</code>
      */
     THROTTLE(1),
+    /**
+     * <code>SPACE = 2;</code>
+     */
+    SPACE(2),
     ;
 
     /**
      * <code>THROTTLE = 1;</code>
      */
     public static final int THROTTLE_VALUE = 1;
+    /**
+     * <code>SPACE = 2;</code>
+     */
+    public static final int SPACE_VALUE = 2;
 
 
     public final int getNumber() {
@@ -262,6 +270,7 @@ public final class QuotaProtos {
     public static QuotaType forNumber(int value) {
       switch (value) {
         case 1: return THROTTLE;
+        case 2: return SPACE;
         default: return null;
       }
     }
@@ -311,6 +320,150 @@ public final class QuotaProtos {
     // @@protoc_insertion_point(enum_scope:hbase.pb.QuotaType)
   }
 
+  /**
+   * <pre>
+   * Defines what action should be taken when the SpaceQuota is violated
+   * </pre>
+   *
+   * Protobuf enum {@code hbase.pb.SpaceViolationPolicy}
+   */
+  public enum SpaceViolationPolicy
+      implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <pre>
+     * Disable the table(s)
+     * </pre>
+     *
+     * <code>DISABLE = 1;</code>
+     */
+    DISABLE(1),
+    /**
+     * <pre>
+     * No writes, bulk-loads, or compactions
+     * </pre>
+     *
+     * <code>NO_WRITES_COMPACTIONS = 2;</code>
+     */
+    NO_WRITES_COMPACTIONS(2),
+    /**
+     * <pre>
+     * No writes or bulk-loads
+     * </pre>
+     *
+     * <code>NO_WRITES = 3;</code>
+     */
+    NO_WRITES(3),
+    /**
+     * <pre>
+     * No puts or bulk-loads, but deletes are allowed
+     * </pre>
+     *
+     * <code>NO_INSERTS = 4;</code>
+     */
+    NO_INSERTS(4),
+    ;
+
+    /**
+     * <pre>
+     * Disable the table(s)
+     * </pre>
+     *
+     * <code>DISABLE = 1;</code>
+     */
+    public static final int DISABLE_VALUE = 1;
+    /**
+     * <pre>
+     * No writes, bulk-loads, or compactions
+     * </pre>
+     *
+     * <code>NO_WRITES_COMPACTIONS = 2;</code>
+     */
+    public static final int NO_WRITES_COMPACTIONS_VALUE = 2;
+    /**
+     * <pre>
+     * No writes or bulk-loads
+     * </pre>
+     *
+     * <code>NO_WRITES = 3;</code>
+     */
+    public static final int NO_WRITES_VALUE = 3;
+    /**
+     * <pre>
+     * No puts or bulk-loads, but deletes are allowed
+     * </pre>
+     *
+     * <code>NO_INSERTS = 4;</code>
+     */
+    public static final int NO_INSERTS_VALUE = 4;
+
+
+    public final int getNumber() {
+      return value;
+    }
+
+    /**
+     * @deprecated Use {@link #forNumber(int)} instead.
+     */
+    @java.lang.Deprecated
+    public static SpaceViolationPolicy valueOf(int value) {
+      return forNumber(value);
+    }
+
+    public static SpaceViolationPolicy forNumber(int value) {
+      switch (value) {
+        case 1: return DISABLE;
+        case 2: return NO_WRITES_COMPACTIONS;
+        case 3: return NO_WRITES;
+        case 4: return NO_INSERTS;
+        default: return null;
+      }
+    }
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<SpaceViolationPolicy>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+        SpaceViolationPolicy> internalValueMap =
+          new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<SpaceViolationPolicy>() {
+            public SpaceViolationPolicy findValueByNumber(int number) {
+              return SpaceViolationPolicy.forNumber(number);
+            }
+          };
+
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(ordinal());
+    }
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.getDescriptor().getEnumTypes().get(3);
+    }
+
+    private static final SpaceViolationPolicy[] VALUES = values();
+
+    public static SpaceViolationPolicy valueOf(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int value;
+
+    private SpaceViolationPolicy(int value) {
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:hbase.pb.SpaceViolationPolicy)
+  }
+
   public interface TimedQuotaOrBuilder extends
       // @@protoc_insertion_point(interface_extends:hbase.pb.TimedQuota)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -3419,6 +3572,19 @@ public final class QuotaProtos {
      * <code>optional .hbase.pb.Throttle throttle = 2;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleOrBuilder getThrottleOrBuilder();
+
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    boolean hasSpace();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder();
   }
   /**
    * Protobuf type {@code hbase.pb.Quotas}
@@ -3481,6 +3647,19 @@ public final class QuotaProtos {
               bitField0_ |= 0x00000002;
               break;
             }
+            case 26: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000004) == 0x00000004)) {
+                subBuilder = space_.toBuilder();
+              }
+              space_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(space_);
+                space_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000004;
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -3542,6 +3721,27 @@ public final class QuotaProtos {
       return throttle_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle.getDefaultInstance() : throttle_;
     }
 
+    public static final int SPACE_FIELD_NUMBER = 3;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota space_;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public boolean hasSpace() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace() {
+      return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_;
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() {
+      return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_;
+    }
+
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
@@ -3566,6 +3766,9 @@ public final class QuotaProtos {
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeMessage(2, getThrottle());
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeMessage(3, getSpace());
+      }
       unknownFields.writeTo(output);
     }
 
@@ -3582,6 +3785,10 @@ public final class QuotaProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeMessageSize(2, getThrottle());
       }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, getSpace());
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -3609,6 +3816,11 @@ public final class QuotaProtos {
         result = result && getThrottle()
             .equals(other.getThrottle());
       }
+      result = result && (hasSpace() == other.hasSpace());
+      if (hasSpace()) {
+        result = result && getSpace()
+            .equals(other.getSpace());
+      }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
@@ -3629,6 +3841,10 @@ public final class QuotaProtos {
         hash = (37 * hash) + THROTTLE_FIELD_NUMBER;
         hash = (53 * hash) + getThrottle().hashCode();
       }
+      if (hasSpace()) {
+        hash = (37 * hash) + SPACE_FIELD_NUMBER;
+        hash = (53 * hash) + getSpace().hashCode();
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -3744,6 +3960,7 @@ public final class QuotaProtos {
         if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
                 .alwaysUseFieldBuilders) {
           getThrottleFieldBuilder();
+          getSpaceFieldBuilder();
         }
       }
       public Builder clear() {
@@ -3756,6 +3973,12 @@ public final class QuotaProtos {
           throttleBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000002);
+        if (spaceBuilder_ == null) {
+          space_ = null;
+        } else {
+          spaceBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
 
@@ -3792,6 +4015,14 @@ public final class QuotaProtos {
         } else {
           result.throttle_ = throttleBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        if (spaceBuilder_ == null) {
+          result.space_ = space_;
+        } else {
+          result.space_ = spaceBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -3840,6 +4071,9 @@ public final class QuotaProtos {
         if (other.hasThrottle()) {
           mergeThrottle(other.getThrottle());
         }
+        if (other.hasSpace()) {
+          mergeSpace(other.getSpace());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -4022,6 +4256,124 @@ public final class QuotaProtos {
         }
         return throttleBuilder_;
       }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota space_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> spaceBuilder_;
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public boolean hasSpace() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getSpace() {
+        if (spaceBuilder_ == null) {
+          return space_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_;
+        } else {
+          return spaceBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder setSpace(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (spaceBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          space_ = value;
+          onChanged();
+        } else {
+          spaceBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder setSpace(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) {
+        if (spaceBuilder_ == null) {
+          space_ = builderForValue.build();
+          onChanged();
+        } else {
+          spaceBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder mergeSpace(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (spaceBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004) &&
+              space_ != null &&
+              space_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) {
+            space_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(space_).mergeFrom(value).buildPartial();
+          } else {
+            space_ = value;
+          }
+          onChanged();
+        } else {
+          spaceBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public Builder clearSpace() {
+        if (spaceBuilder_ == null) {
+          space_ = null;
+          onChanged();
+        } else {
+          spaceBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder getSpaceBuilder() {
+        bitField0_ |= 0x00000004;
+        onChanged();
+        return getSpaceFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getSpaceOrBuilder() {
+        if (spaceBuilder_ != null) {
+          return spaceBuilder_.getMessageOrBuilder();
+        } else {
+          return space_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : space_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          getSpaceFieldBuilder() {
+        if (spaceBuilder_ == null) {
+          spaceBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>(
+                  getSpace(),
+                  getParentForChildren(),
+                  isClean());
+          space_ = null;
+        }
+        return spaceBuilder_;
+      }
       public final Builder setUnknownFields(
           final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
         return super.setUnknownFields(unknownFields);
@@ -4444,75 +4796,1314 @@ public final class QuotaProtos {
 
   }
 
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_TimedQuota_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Throttle_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_Throttle_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_ThrottleRequest_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_Quotas_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_Quotas_fieldAccessorTable;
-  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_QuotaUsage_descriptor;
-  private static final 
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
-      internal_static_hbase_pb_QuotaUsage_fieldAccessorTable;
+  public interface SpaceQuotaOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.SpaceQuota)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
 
-  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
-      getDescriptor() {
-    return descriptor;
+    /**
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     *
+     * <code>optional uint64 soft_limit = 1;</code>
+     */
+    boolean hasSoftLimit();
+    /**
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     *
+     * <code>optional uint64 soft_limit = 1;</code>
+     */
+    long getSoftLimit();
+
+    /**
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     *
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     */
+    boolean hasViolationPolicy();
+    /**
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     *
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy();
   }
-  private static  org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
-      descriptor;
-  static {
-    java.lang.String[] descriptorData = {
-      "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" +
-      "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" +
-      "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" +
-      "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" +
-      "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" +
-      "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" +
-      "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" +
-      "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" +
-      " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" +
-      " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030",
-      "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" +
-      "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" +
-      "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" +
-      "edQuota\"M\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" +
-      "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" +
-      "rottle\"\014\n\nQuotaUsage*&\n\nQuotaScope\022\013\n\007CL" +
-      "USTER\020\001\022\013\n\007MACHINE\020\002*v\n\014ThrottleType\022\022\n\016" +
-      "REQUEST_NUMBER\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WR" +
-      "ITE_NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUM" +
-      "BER\020\005\022\r\n\tREAD_SIZE\020\006*\031\n\tQuotaType\022\014\n\010THR",
-      "OTTLE\020\001BH\n1org.apache.hadoop.hbase.shade" +
-      "d.protobuf.generatedB\013QuotaProtosH\001\210\001\001\240\001" +
-      "\001"
-    };
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
-        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
-          public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
-              org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
-            descriptor = root;
-            return null;
-          }
-        };
-    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
-      .internalBuildGeneratedFileFrom(descriptorData,
-        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
+  /**
+   * <pre>
+   * Defines a limit on the amount of filesystem space used by a table/namespace
+   * </pre>
+   *
+   * Protobuf type {@code hbase.pb.SpaceQuota}
+   */
+  public  static final class SpaceQuota extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.SpaceQuota)
+      SpaceQuotaOrBuilder {
+    // Use SpaceQuota.newBuilder() to construct.
+    private SpaceQuota(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private SpaceQuota() {
+      softLimit_ = 0L;
+      violationPolicy_ = 1;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SpaceQuota(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              softLimit_ = input.readUInt64();
+              break;
+            }
+            case 16: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy value = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(2, rawValue);
+              } else {
+                bitField0_ |= 0x00000002;
+                violationPolicy_ = rawValue;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int SOFT_LIMIT_FIELD_NUMBER = 1;
+    private long softLimit_;
+    /**
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     *
+     * <code>optional uint64 soft_limit = 1;</code>
+     */
+    public boolean hasSoftLimit() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <pre>
+     * The limit of bytes for this quota
+     * </pre>
+     *
+     * <code>optional uint64 soft_limit = 1;</code>
+     */
+    public long getSoftLimit() {
+      return softLimit_;
+    }
+
+    public static final int VIOLATION_POLICY_FIELD_NUMBER = 2;
+    private int violationPolicy_;
+    /**
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     *
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     */
+    public boolean hasViolationPolicy() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <pre>
+     * The action to take when the quota is violated
+     * </pre>
+     *
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+      org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy result = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(violationPolicy_);
+      return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE : result;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, softLimit_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeEnum(2, violationPolicy_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, softLimit_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeEnumSize(2, violationPolicy_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota other = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) obj;
+
+      boolean result = true;
+      result = result && (hasSoftLimit() == other.hasSoftLimit());
+      if (hasSoftLimit()) {
+        result = result && (getSoftLimit()
+            == other.getSoftLimit());
+      }
+      result = result && (hasViolationPolicy() == other.hasViolationPolicy());
+      if (hasViolationPolicy()) {
+        result = result && violationPolicy_ == other.violationPolicy_;
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasSoftLimit()) {
+        hash = (37 * hash) + SOFT_LIMIT_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getSoftLimit());
+      }
+      if (hasViolationPolicy()) {
+        hash = (37 * hash) + VIOLATION_POLICY_FIELD_NUMBER;
+        hash = (53 * hash) + violationPolicy_;
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * <pre>
+     * Defines a limit on the amount of filesystem space used by a table/namespace
+     * </pre>
+     *
+     * Protobuf type {@code hbase.pb.SpaceQuota}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.SpaceQuota)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        softLimit_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        violationPolicy_ = 1;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceQuota_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota result = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.softLimit_ = softLimit_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.violationPolicy_ = violationPolicy_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) return this;
+        if (other.hasSoftLimit()) {
+          setSoftLimit(other.getSoftLimit());
+        }
+        if (other.hasViolationPolicy()) {
+          setViolationPolicy(other.getViolationPolicy());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private long softLimit_ ;
+      /**
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       *
+       * <code>optional uint64 soft_limit = 1;</code>
+       */
+      public boolean hasSoftLimit() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       *
+       * <code>optional uint64 soft_limit = 1;</code>
+       */
+      public long getSoftLimit() {
+        return softLimit_;
+      }
+      /**
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       *
+       * <code>optional uint64 soft_limit = 1;</code>
+       */
+      public Builder setSoftLimit(long value) {
+        bitField0_ |= 0x00000001;
+        softLimit_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <pre>
+       * The limit of bytes for this quota
+       * </pre>
+       *
+       * <code>optional uint64 soft_limit = 1;</code>
+       */
+      public Builder clearSoftLimit() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        softLimit_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      private int violationPolicy_ = 1;
+      /**
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       *
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       */
+      public boolean hasViolationPolicy() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       *
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy result = org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.valueOf(violationPolicy_);
+        return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE : result;
+      }
+      /**
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       *
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       */
+      public Builder setViolationPolicy(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000002;
+        violationPolicy_ = value.getNumber();
+        onChanged();
+        return this;
+      }
+      /**
+       * <pre>
+       * The action to take when the quota is violated
+       * </pre>
+       *
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 2;</code>
+       */
+      public Builder clearViolationPolicy() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        violationPolicy_ = 1;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceQuota)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.SpaceQuota)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceQuota>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<SpaceQuota>() {
+      public SpaceQuota parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new SpaceQuota(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceQuota> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceQuota> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface SpaceLimitRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.SpaceLimitRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    boolean hasQuota();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota();
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder();
+  }
+  /**
+   * <pre>
+   * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+   * </pre>
+   *
+   * Protobuf type {@code hbase.pb.SpaceLimitRequest}
+   */
+  public  static final class SpaceLimitRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.SpaceLimitRequest)
+      SpaceLimitRequestOrBuilder {
+    // Use SpaceLimitRequest.newBuilder() to construct.
+    private SpaceLimitRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private SpaceLimitRequest() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private SpaceLimitRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000001) == 0x00000001)) {
+                subBuilder = quota_.toBuilder();
+              }
+              quota_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(quota_);
+                quota_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000001;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int QUOTA_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota quota_;
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public boolean hasQuota() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota() {
+      return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_;
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() {
+      return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeMessage(1, getQuota());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, getQuota());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) obj;
+
+      boolean result = true;
+      result = result && (hasQuota() == other.hasQuota());
+      if (hasQuota()) {
+        result = result && getQuota()
+            .equals(other.getQuota());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasQuota()) {
+        hash = (37 * hash) + QUOTA_FIELD_NUMBER;
+        hash = (53 * hash) + getQuota().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * <pre>
+     * The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+     * </pre>
+     *
+     * Protobuf type {@code hbase.pb.SpaceLimitRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.SpaceLimitRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getQuotaFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (quotaBuilder_ == null) {
+          quota_ = null;
+        } else {
+          quotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        if (quotaBuilder_ == null) {
+          result.quota_ = quota_;
+        } else {
+          result.quota_ = quotaBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) return this;
+        if (other.hasQuota()) {
+          mergeQuota(other.getQuota());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota quota_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> quotaBuilder_;
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public boolean hasQuota() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota getQuota() {
+        if (quotaBuilder_ == null) {
+          return quota_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_;
+        } else {
+          return quotaBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder setQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (quotaBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          quota_ = value;
+          onChanged();
+        } else {
+          quotaBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder setQuota(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder builderForValue) {
+        if (quotaBuilder_ == null) {
+          quota_ = builderForValue.build();
+          onChanged();
+        } else {
+          quotaBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder mergeQuota(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota value) {
+        if (quotaBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001) &&
+              quota_ != null &&
+              quota_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance()) {
+            quota_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.newBuilder(quota_).mergeFrom(value).buildPartial();
+          } else {
+            quota_ = value;
+          }
+          onChanged();
+        } else {
+          quotaBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000001;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public Builder clearQuota() {
+        if (quotaBuilder_ == null) {
+          quota_ = null;
+          onChanged();
+        } else {
+          quotaBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder getQuotaBuilder() {
+        bitField0_ |= 0x00000001;
+        onChanged();
+        return getQuotaFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder getQuotaOrBuilder() {
+        if (quotaBuilder_ != null) {
+          return quotaBuilder_.getMessageOrBuilder();
+        } else {
+          return quota_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.getDefaultInstance() : quota_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          getQuotaFieldBuilder() {
+        if (quotaBuilder_ == null) {
+          quotaBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>(
+                  getQuota(),
+                  getParentForChildren(),
+                  isClean());
+          quota_ = null;
+        }
+        return quotaBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.SpaceLimitRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.SpaceLimitRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceLimitRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<SpaceLimitRequest>() {
+      public SpaceLimitRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new SpaceLimitRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceLimitRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<SpaceLimitRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_TimedQuota_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_TimedQuota_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_Throttle_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_Throttle_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_ThrottleRequest_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_ThrottleRequest_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_Quotas_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_Quotas_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_QuotaUsage_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_QuotaUsage_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_SpaceQuota_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_SpaceQuota_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_SpaceLimitRequest_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable;
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static  org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\013Quota.proto\022\010hbase.pb\032\013HBase.proto\"\204\001\n" +
+      "\nTimedQuota\022%\n\ttime_unit\030\001 \002(\0162\022.hbase.p" +
+      "b.TimeUnit\022\022\n\nsoft_limit\030\002 \001(\004\022\r\n\005share\030" +
+      "\003 \001(\002\022,\n\005scope\030\004 \001(\0162\024.hbase.pb.QuotaSco" +
+      "pe:\007MACHINE\"\375\001\n\010Throttle\022%\n\007req_num\030\001 \001(" +
+      "\0132\024.hbase.pb.TimedQuota\022&\n\010req_size\030\002 \001(" +
+      "\0132\024.hbase.pb.TimedQuota\022\'\n\twrite_num\030\003 \001" +
+      "(\0132\024.hbase.pb.TimedQuota\022(\n\nwrite_size\030\004" +
+      " \001(\0132\024.hbase.pb.TimedQuota\022&\n\010read_num\030\005" +
+      " \001(\0132\024.hbase.pb.TimedQuota\022\'\n\tread_size\030",
+      "\006 \001(\0132\024.hbase.pb.TimedQuota\"b\n\017ThrottleR" +
+      "equest\022$\n\004type\030\001 \001(\0162\026.hbase.pb.Throttle" +
+      "Type\022)\n\013timed_quota\030\002 \001(\0132\024.hbase.pb.Tim" +
+      "edQuota\"r\n\006Quotas\022\035\n\016bypass_globals\030\001 \001(" +
+      "\010:\005false\022$\n\010throttle\030\002 \001(\0132\022.hbase.pb.Th" +
+      "rottle\022#\n\005space\030\003 \001(\0132\024.hbase.pb.SpaceQu" +
+      "ota\"\014\n\nQuotaUsage\"Z\n\nSpaceQuota\022\022\n\nsoft_" +
+      "limit\030\001 \001(\004\0228\n\020violation_policy\030\002 \001(\0162\036." +
+      "hbase.pb.SpaceViolationPolicy\"8\n\021SpaceLi" +
+      "mitRequest\022#\n\005quota\030\001 \001(\0132\024.hbase.pb.Spa",
+      "ceQuota*&\n\nQuotaScope\022\013\n\007CLUSTER\020\001\022\013\n\007MA" +
+      "CHINE\020\002*v\n\014ThrottleType\022\022\n\016REQUEST_NUMBE" +
+      "R\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022" +
+      "\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020\005\022\r\n\tREAD" +
+      "_SIZE\020\006*$\n\tQuotaType\022\014\n\010THROTTLE\020\001\022\t\n\005SP" +
+      "ACE\020\002*]\n\024SpaceViolationPolicy\022\013\n\007DISABLE" +
+      "\020\001\022\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r\n\tNO_WRIT" +
+      "ES\020\003\022\016\n\nNO_INSERTS\020\004BH\n1org.apache.hadoo" +
+      "p.hbase.shaded.protobuf.generatedB\013Quota" +
+      "ProtosH\001\210\001\001\240\001\001"
+    };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
+          public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
+            descriptor = root;
+            return null;
+          }
+        };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
           org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor(),
         }, assigner);
     internal_static_hbase_pb_TimedQuota_descriptor =
@@ -4538,13 +6129,25 @@ public final class QuotaProtos {
     internal_static_hbase_pb_Quotas_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_Quotas_descriptor,
-        new java.lang.String[] { "BypassGlobals", "Throttle", });
+        new java.lang.String[] { "BypassGlobals", "Throttle", "Space", });
     internal_static_hbase_pb_QuotaUsage_descriptor =
       getDescriptor().getMessageTypes().get(4);
     internal_static_hbase_pb_QuotaUsage_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_QuotaUsage_descriptor,
         new java.lang.String[] { });
+    internal_static_hbase_pb_SpaceQuota_descriptor =
+      getDescriptor().getMessageTypes().get(5);
+    internal_static_hbase_pb_SpaceQuota_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_SpaceQuota_descriptor,
+        new java.lang.String[] { "SoftLimit", "ViolationPolicy", });
+    internal_static_hbase_pb_SpaceLimitRequest_descriptor =
+      getDescriptor().getMessageTypes().get(6);
+    internal_static_hbase_pb_SpaceLimitRequest_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_SpaceLimitRequest_descriptor,
+        new java.lang.String[] { "Quota", });
     org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.getDescriptor();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index e62f52c..f4f5fc8 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -521,6 +521,8 @@ message SetQuotaRequest {
   optional bool remove_all = 5;
   optional bool bypass_globals = 6;
   optional ThrottleRequest throttle = 7;
+
+  optional SpaceLimitRequest space_limit = 8;
 }
 
 message SetQuotaResponse {

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol-shaded/src/main/protobuf/Quota.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 240c535..b53219a 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -65,12 +65,33 @@ message ThrottleRequest {
 
 enum QuotaType {
   THROTTLE = 1;
+  SPACE = 2;
 }
 
 message Quotas {
   optional bool bypass_globals = 1 [default = false];
   optional Throttle throttle = 2;
+  optional SpaceQuota space = 3;
 }
 
 message QuotaUsage {
 }
+
+// Defines what action should be taken when the SpaceQuota is violated
+enum SpaceViolationPolicy {
+  DISABLE = 1; // Disable the table(s)
+  NO_WRITES_COMPACTIONS = 2; // No writes, bulk-loads, or compactions
+  NO_WRITES = 3; // No writes or bulk-loads
+  NO_INSERTS = 4; // No puts or bulk-loads, but deletes are allowed
+}
+
+// Defines a limit on the amount of filesystem space used by a table/namespace
+message SpaceQuota {
+  optional uint64 soft_limit = 1; // The limit of bytes for this quota
+  optional SpaceViolationPolicy violation_policy = 2; // The action to take when the quota is violated
+}
+
+// The Request to limit space usage (to allow for schema evolution not tied to SpaceQuota).
+message SpaceLimitRequest {
+  optional SpaceQuota quota = 1;
+}


[06/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
deleted file mode 100644
index 5c86429..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransactionImpl.java
+++ /dev/null
@@ -1,742 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitationsME
- * under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.ListIterator;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.MetaMutationAnnotation;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
-import org.apache.hadoop.hbase.regionserver.SplitTransactionImpl.LoggingProgressable;
-import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.Pair;
-
-import com.google.common.annotations.VisibleForTesting;
-
-@InterfaceAudience.Private
-public class RegionMergeTransactionImpl implements RegionMergeTransaction {
-  private static final Log LOG = LogFactory.getLog(RegionMergeTransactionImpl.class);
-
-  // Merged region info
-  private HRegionInfo mergedRegionInfo;
-  // region_a sorts before region_b
-  private final HRegion region_a;
-  private final HRegion region_b;
-  // merges dir is under region_a
-  private final Path mergesdir;
-  // We only merge adjacent regions if forcible is false
-  private final boolean forcible;
-  private final long masterSystemTime;
-
-  /*
-   * Transaction state for listener, only valid during execute and
-   * rollback
-   */
-  private RegionMergeTransactionPhase currentPhase = RegionMergeTransactionPhase.STARTED;
-  private Server server;
-  private RegionServerServices rsServices;
-
-  public static class JournalEntryImpl implements JournalEntry {
-    private RegionMergeTransactionPhase type;
-    private long timestamp;
-
-    public JournalEntryImpl(RegionMergeTransactionPhase type) {
-      this(type, EnvironmentEdgeManager.currentTime());
-    }
-
-    public JournalEntryImpl(RegionMergeTransactionPhase type, long timestamp) {
-      this.type = type;
-      this.timestamp = timestamp;
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder sb = new StringBuilder();
-      sb.append(type);
-      sb.append(" at ");
-      sb.append(timestamp);
-      return sb.toString();
-    }
-
-    @Override
-    public RegionMergeTransactionPhase getPhase() {
-      return type;
-    }
-
-    @Override
-    public long getTimeStamp() {
-      return timestamp;
-    }
-  }
-
-  /*
-   * Journal of how far the merge transaction has progressed.
-   */
-  private final List<JournalEntry> journal = new ArrayList<JournalEntry>();
-
-  /**
-   * Listeners
-   */
-  private final ArrayList<TransactionListener> listeners = new ArrayList<TransactionListener>();
-
-  private static IOException closedByOtherException = new IOException(
-      "Failed to close region: already closed by another thread");
-
-  private RegionServerCoprocessorHost rsCoprocessorHost = null;
-
-  /**
-   * Constructor
-   * @param a region a to merge
-   * @param b region b to merge
-   * @param forcible if false, we will only merge adjacent regions
-   */
-  public RegionMergeTransactionImpl(final Region a, final Region b,
-      final boolean forcible) {
-    this(a, b, forcible, EnvironmentEdgeManager.currentTime());
-  }
-  /**
-   * Constructor
-   * @param a region a to merge
-   * @param b region b to merge
-   * @param forcible if false, we will only merge adjacent regions
-   * @param masterSystemTime the time at the master side
-   */
-  public RegionMergeTransactionImpl(final Region a, final Region b,
-      final boolean forcible, long masterSystemTime) {
-    if (a.getRegionInfo().compareTo(b.getRegionInfo()) <= 0) {
-      this.region_a = (HRegion)a;
-      this.region_b = (HRegion)b;
-    } else {
-      this.region_a = (HRegion)b;
-      this.region_b = (HRegion)a;
-    }
-    this.forcible = forcible;
-    this.masterSystemTime = masterSystemTime;
-    this.mergesdir = region_a.getRegionFileSystem().getMergesDir();
-  }
-
-  private void transition(RegionMergeTransactionPhase nextPhase) throws IOException {
-    transition(nextPhase, false);
-  }
-
-  private void transition(RegionMergeTransactionPhase nextPhase, boolean isRollback)
-      throws IOException {
-    if (!isRollback) {
-      // Add to the journal first, because if the listener throws an exception
-      // we need to roll back starting at 'nextPhase'
-      this.journal.add(new JournalEntryImpl(nextPhase));
-    }
-    for (int i = 0; i < listeners.size(); i++) {
-      TransactionListener listener = listeners.get(i);
-      if (!isRollback) {
-        listener.transition(this, currentPhase, nextPhase);
-      } else {
-        listener.rollback(this, currentPhase, nextPhase);
-      }
-    }
-    currentPhase = nextPhase;
-  }
-
-  @Override
-  public boolean prepare(final RegionServerServices services) throws IOException {
-    if (!region_a.getTableDesc().getTableName()
-        .equals(region_b.getTableDesc().getTableName())) {
-      LOG.info("Can't merge regions " + region_a + "," + region_b
-          + " because they do not belong to the same table");
-      return false;
-    }
-    if (region_a.getRegionInfo().equals(region_b.getRegionInfo())) {
-      LOG.info("Can't merge the same region " + region_a);
-      return false;
-    }
-    if (!forcible && !HRegionInfo.areAdjacent(region_a.getRegionInfo(),
-            region_b.getRegionInfo())) {
-      String msg = "Skip merging " + region_a.getRegionInfo().getRegionNameAsString()
-          + " and " + region_b.getRegionInfo().getRegionNameAsString()
-          + ", because they are not adjacent.";
-      LOG.info(msg);
-      return false;
-    }
-    if (!this.region_a.isMergeable() || !this.region_b.isMergeable()) {
-      return false;
-    }
-    try {
-      boolean regionAHasMergeQualifier = hasMergeQualifierInMeta(services,
-          region_a.getRegionInfo().getRegionName());
-      if (regionAHasMergeQualifier ||
-          hasMergeQualifierInMeta(services, region_b.getRegionInfo().getRegionName())) {
-        LOG.debug("Region " + (regionAHasMergeQualifier ? 
-            region_a.getRegionInfo().getRegionNameAsString()
-                : region_b.getRegionInfo().getRegionNameAsString())
-            + " is not mergeable because it has merge qualifier in META");
-        return false;
-      }
-    } catch (IOException e) {
-      LOG.warn("Failed judging whether merge transaction is available for "
-              + region_a.getRegionInfo().getRegionNameAsString() + " and "
-              + region_b.getRegionInfo().getRegionNameAsString(), e);
-      return false;
-    }
-
-    // WARN: make sure there is no parent region of the two merging regions in
-    // hbase:meta If exists, fixing up daughters would cause daughter regions(we
-    // have merged one) online again when we restart master, so we should clear
-    // the parent region to prevent the above case
-    // Since HBASE-7721, we don't need fix up daughters any more. so here do
-    // nothing
-
-    this.mergedRegionInfo = getMergedRegionInfo(region_a.getRegionInfo(),
-        region_b.getRegionInfo());
-
-    transition(RegionMergeTransactionPhase.PREPARED);
-    return true;
-  }
-
-  @Override
-  public Region execute(final Server server, final RegionServerServices services)
-      throws IOException {
-    if (User.isHBaseSecurityEnabled(region_a.getBaseConf())) {
-      LOG.warn("Should use execute(Server, RegionServerServices, User)");
-    }
-    return execute(server, services, null);
-  }
-
-  @Override
-  public Region execute(final Server server, final RegionServerServices services, User user)
-      throws IOException {
-    this.server = server;
-    this.rsServices = services;
-    if (rsCoprocessorHost == null) {
-      rsCoprocessorHost = server != null ?
-        ((HRegionServer) server).getRegionServerCoprocessorHost() : null;
-    }
-    final HRegion mergedRegion = createMergedRegion(server, services, user);
-    if (rsCoprocessorHost != null) {
-      rsCoprocessorHost.postMergeCommit(this.region_a, this.region_b, mergedRegion, user);
-    }
-    stepsAfterPONR(server, services, mergedRegion, user);
-
-    transition(RegionMergeTransactionPhase.COMPLETED);
-
-    return mergedRegion;
-  }
-
-  @VisibleForTesting
-  public void stepsAfterPONR(final Server server, final RegionServerServices services,
-      final HRegion mergedRegion, User user) throws IOException {
-    openMergedRegion(server, services, mergedRegion);
-    if (rsCoprocessorHost != null) {
-      rsCoprocessorHost.postMerge(region_a, region_b, mergedRegion, user);
-    }
-  }
-
-  /**
-   * Prepare the merged region and region files.
-   * @param server Hosting server instance. Can be null when testing
-   * @param services Used to online/offline regions.
-   * @return merged region
-   * @throws IOException If thrown, transaction failed. Call
-   *           {@link #rollback(Server, RegionServerServices)}
-   */
-  private HRegion createMergedRegion(final Server server, final RegionServerServices services,
-      User user) throws IOException {
-    LOG.info("Starting merge of " + region_a + " and "
-        + region_b.getRegionInfo().getRegionNameAsString() + ", forcible=" + forcible);
-    if ((server != null && server.isStopped())
-        || (services != null && services.isStopping())) {
-      throw new IOException("Server is stopped or stopping");
-    }
-
-    if (rsCoprocessorHost != null) {
-      boolean ret = rsCoprocessorHost.preMerge(region_a, region_b, user);
-      if (ret) {
-        throw new IOException("Coprocessor bypassing regions " + this.region_a + " "
-            + this.region_b + " merge.");
-      }
-    }
-
-    // If true, no cluster to write meta edits to or to use coordination.
-    boolean testing = server == null ? true : server.getConfiguration()
-        .getBoolean("hbase.testing.nocluster", false);
-
-    HRegion mergedRegion = stepsBeforePONR(server, services, testing);
-
-    @MetaMutationAnnotation
-    final List<Mutation> metaEntries = new ArrayList<Mutation>();
-    if (rsCoprocessorHost != null) {
-      boolean ret = rsCoprocessorHost.preMergeCommit(region_a, region_b, metaEntries, user);
-
-      if (ret) {
-        throw new IOException("Coprocessor bypassing regions " + this.region_a + " "
-            + this.region_b + " merge.");
-      }
-      try {
-        for (Mutation p : metaEntries) {
-          HRegionInfo.parseRegionName(p.getRow());
-        }
-      } catch (IOException e) {
-        LOG.error("Row key of mutation from coprocessor is not parsable as region name."
-            + "Mutations from coprocessor should only be for hbase:meta table.", e);
-        throw e;
-      }
-    }
-
-    // This is the point of no return. Similar with SplitTransaction.
-    // IF we reach the PONR then subsequent failures need to crash out this
-    // regionserver
-    transition(RegionMergeTransactionPhase.PONR);
-
-    // Add merged region and delete region_a and region_b
-    // as an atomic update. See HBASE-7721. This update to hbase:meta makes the region
-    // will determine whether the region is merged or not in case of failures.
-    // If it is successful, master will roll-forward, if not, master will
-    // rollback
-    if (services != null && !services.reportRegionStateTransition(TransitionCode.MERGE_PONR,
-        mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-      // Passed PONR, let SSH clean it up
-      throw new IOException("Failed to notify master that merge passed PONR: "
-        + region_a.getRegionInfo().getRegionNameAsString() + " and "
-        + region_b.getRegionInfo().getRegionNameAsString());
-    }
-    return mergedRegion;
-  }
-
-  @VisibleForTesting
-  public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
-      HRegionInfo regionB, ServerName serverName, List<Mutation> mutations) throws IOException {
-    HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
-
-    // use the maximum of what master passed us vs local time.
-    long time = Math.max(EnvironmentEdgeManager.currentTime(), masterSystemTime);
-
-    // Put for parent
-    Put putOfMerged = MetaTableAccessor.makePutFromRegionInfo(copyOfMerged, time);
-    putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEA_QUALIFIER,
-        regionA.toByteArray());
-    putOfMerged.addColumn(HConstants.CATALOG_FAMILY, HConstants.MERGEB_QUALIFIER,
-        regionB.toByteArray());
-    mutations.add(putOfMerged);
-    // Deletes for merging regions
-    Delete deleteA = MetaTableAccessor.makeDeleteFromRegionInfo(regionA, time);
-    Delete deleteB = MetaTableAccessor.makeDeleteFromRegionInfo(regionB, time);
-    mutations.add(deleteA);
-    mutations.add(deleteB);
-    // The merged is a new region, openSeqNum = 1 is fine.
-    addLocation(putOfMerged, serverName, 1);
-  }
-
-  @VisibleForTesting
-  Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER, Bytes
-            .toBytes(sn.getHostAndPort()));
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.STARTCODE_QUALIFIER, Bytes.toBytes(sn
-            .getStartcode()));
-    p.addColumn(HConstants.CATALOG_FAMILY, HConstants.SEQNUM_QUALIFIER, Bytes.toBytes(openSeqNum));
-    return p;
-  }
-
-  @VisibleForTesting
-  public HRegion stepsBeforePONR(final Server server, final RegionServerServices services,
-      boolean testing) throws IOException {
-    if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE,
-        mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-      throw new IOException("Failed to get ok from master to merge "
-        + region_a.getRegionInfo().getRegionNameAsString() + " and "
-        + region_b.getRegionInfo().getRegionNameAsString());
-    }
-
-    transition(RegionMergeTransactionPhase.SET_MERGING);
-
-    this.region_a.getRegionFileSystem().createMergesDir();
-
-    transition(RegionMergeTransactionPhase.CREATED_MERGE_DIR);
-
-    Map<byte[], List<StoreFile>> hstoreFilesOfRegionA = closeAndOfflineRegion(
-        services, this.region_a, true, testing);
-    Map<byte[], List<StoreFile>> hstoreFilesOfRegionB = closeAndOfflineRegion(
-        services, this.region_b, false, testing);
-
-    assert hstoreFilesOfRegionA != null && hstoreFilesOfRegionB != null;
-
-    // mergeStoreFiles creates merged region dirs under the region_a merges dir
-    // Nothing to unroll here if failure -- clean up of CREATE_MERGE_DIR will
-    // clean this up.
-    mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB);
-
-    // Log to the journal that we are creating merged region. We could fail
-    // halfway through. If we do, we could have left
-    // stuff in fs that needs cleanup -- a storefile or two. Thats why we
-    // add entry to journal BEFORE rather than AFTER the change.
-
-    transition(RegionMergeTransactionPhase.STARTED_MERGED_REGION_CREATION);
-
-    HRegion mergedRegion = createMergedRegionFromMerges(this.region_a,
-        this.region_b, this.mergedRegionInfo);
-    return mergedRegion;
-  }
-
-  /**
-   * Create a merged region from the merges directory under region a. In order
-   * to mock it for tests, place it with a new method.
-   * @param a hri of region a
-   * @param b hri of region b
-   * @param mergedRegion hri of merged region
-   * @return merged HRegion.
-   * @throws IOException
-   */
-  @VisibleForTesting
-  HRegion createMergedRegionFromMerges(final HRegion a, final HRegion b,
-      final HRegionInfo mergedRegion) throws IOException {
-    return a.createMergedRegionFromMerges(mergedRegion, b);
-  }
-
-  /**
-   * Close the merging region and offline it in regionserver
-   * @param services
-   * @param region
-   * @param isRegionA true if it is merging region a, false if it is region b
-   * @param testing true if it is testing
-   * @return a map of family name to list of store files
-   * @throws IOException
-   */
-  private Map<byte[], List<StoreFile>> closeAndOfflineRegion(
-      final RegionServerServices services, final HRegion region,
-      final boolean isRegionA, final boolean testing) throws IOException {
-    Map<byte[], List<StoreFile>> hstoreFilesToMerge = null;
-    Exception exceptionToThrow = null;
-    try {
-      hstoreFilesToMerge = region.close(false);
-    } catch (Exception e) {
-      exceptionToThrow = e;
-    }
-    if (exceptionToThrow == null && hstoreFilesToMerge == null) {
-      // The region was closed by a concurrent thread. We can't continue
-      // with the merge, instead we must just abandon the merge. If we
-      // reopen or merge this could cause problems because the region has
-      // probably already been moved to a different server, or is in the
-      // process of moving to a different server.
-      exceptionToThrow = closedByOtherException;
-    }
-    if (exceptionToThrow != closedByOtherException) {
-      transition(isRegionA ? RegionMergeTransactionPhase.CLOSED_REGION_A
-          : RegionMergeTransactionPhase.CLOSED_REGION_B);
-    }
-    if (exceptionToThrow != null) {
-      if (exceptionToThrow instanceof IOException)
-        throw (IOException) exceptionToThrow;
-      throw new IOException(exceptionToThrow);
-    }
-    if (!testing) {
-      services.removeFromOnlineRegions(region, null);
-    }
-
-    transition(isRegionA ? RegionMergeTransactionPhase.OFFLINED_REGION_A
-        : RegionMergeTransactionPhase.OFFLINED_REGION_B);
-
-    return hstoreFilesToMerge;
-  }
-
-  /**
-   * Get merged region info through the specified two regions
-   * @param a merging region A
-   * @param b merging region B
-   * @return the merged region info
-   */
-  @VisibleForTesting
-  static HRegionInfo getMergedRegionInfo(final HRegionInfo a, final HRegionInfo b) {
-    long rid = EnvironmentEdgeManager.currentTime();
-    // Regionid is timestamp. Merged region's id can't be less than that of
-    // merging regions else will insert at wrong location in hbase:meta
-    if (rid < a.getRegionId() || rid < b.getRegionId()) {
-      LOG.warn("Clock skew; merging regions id are " + a.getRegionId()
-          + " and " + b.getRegionId() + ", but current time here is " + rid);
-      rid = Math.max(a.getRegionId(), b.getRegionId()) + 1;
-    }
-
-    byte[] startKey = null;
-    byte[] endKey = null;
-    // Choose the smaller as start key
-    if (a.compareTo(b) <= 0) {
-      startKey = a.getStartKey();
-    } else {
-      startKey = b.getStartKey();
-    }
-    // Choose the bigger as end key
-    if (Bytes.equals(a.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
-        || (!Bytes.equals(b.getEndKey(), HConstants.EMPTY_BYTE_ARRAY)
-            && Bytes.compareTo(a.getEndKey(), b.getEndKey()) > 0)) {
-      endKey = a.getEndKey();
-    } else {
-      endKey = b.getEndKey();
-    }
-
-    // Merged region is sorted between two merging regions in META
-    HRegionInfo mergedRegionInfo = new HRegionInfo(a.getTable(), startKey,
-        endKey, false, rid);
-    return mergedRegionInfo;
-  }
-
-  /**
-   * Perform time consuming opening of the merged region.
-   * @param server Hosting server instance. Can be null when testing
-   * @param services Used to online/offline regions.
-   * @param merged the merged region
-   * @throws IOException If thrown, transaction failed. Call
-   *           {@link #rollback(Server, RegionServerServices)}
-   */
-  @VisibleForTesting
-  void openMergedRegion(final Server server,  final RegionServerServices services,
-      HRegion merged) throws IOException {
-    boolean stopped = server != null && server.isStopped();
-    boolean stopping = services != null && services.isStopping();
-    if (stopped || stopping) {
-      LOG.info("Not opening merged region  " + merged.getRegionInfo().getRegionNameAsString()
-          + " because stopping=" + stopping + ", stopped=" + stopped);
-      return;
-    }
-    HRegionInfo hri = merged.getRegionInfo();
-    LoggingProgressable reporter = server == null ? null
-        : new LoggingProgressable(hri, server.getConfiguration().getLong(
-            "hbase.regionserver.regionmerge.open.log.interval", 10000));
-    merged.openHRegion(reporter);
-
-    if (services != null) {
-      if (!services.reportRegionStateTransition(TransitionCode.MERGED,
-          mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-        throw new IOException("Failed to report merged region to master: "
-          + mergedRegionInfo.getShortNameToLog());
-      }
-      services.addToOnlineRegions(merged);
-    }
-  }
-
-  /**
-   * Create reference file(s) of merging regions under the region_a merges dir
-   * @param hstoreFilesOfRegionA
-   * @param hstoreFilesOfRegionB
-   * @throws IOException
-   */
-  private void mergeStoreFiles(
-      Map<byte[], List<StoreFile>> hstoreFilesOfRegionA,
-      Map<byte[], List<StoreFile>> hstoreFilesOfRegionB)
-      throws IOException {
-    // Create reference file(s) of region A in mergdir
-    HRegionFileSystem fs_a = this.region_a.getRegionFileSystem();
-    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionA
-        .entrySet()) {
-      String familyName = Bytes.toString(entry.getKey());
-      for (StoreFile storeFile : entry.getValue()) {
-        fs_a.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
-            this.mergesdir);
-      }
-    }
-    // Create reference file(s) of region B in mergedir
-    HRegionFileSystem fs_b = this.region_b.getRegionFileSystem();
-    for (Map.Entry<byte[], List<StoreFile>> entry : hstoreFilesOfRegionB
-        .entrySet()) {
-      String familyName = Bytes.toString(entry.getKey());
-      for (StoreFile storeFile : entry.getValue()) {
-        fs_b.mergeStoreFile(this.mergedRegionInfo, familyName, storeFile,
-            this.mergesdir);
-      }
-    }
-  }
-
-  @Override
-  public boolean rollback(final Server server,
-      final RegionServerServices services) throws IOException {
-    if (User.isHBaseSecurityEnabled(region_a.getBaseConf())) {
-      LOG.warn("Should use execute(Server, RegionServerServices, User)");
-    }
-    return rollback(server, services, null);
-  }
-
-  @Override
-  public boolean rollback(final Server server,
-      final RegionServerServices services, User user) throws IOException {
-    assert this.mergedRegionInfo != null;
-    this.server = server;
-    this.rsServices = services;
-    // Coprocessor callback
-    if (rsCoprocessorHost != null) {
-      rsCoprocessorHost.preRollBackMerge(region_a, region_b, user);
-    }
-
-    boolean result = true;
-    ListIterator<JournalEntry> iterator = this.journal
-        .listIterator(this.journal.size());
-    // Iterate in reverse.
-    while (iterator.hasPrevious()) {
-      JournalEntry je = iterator.previous();
-
-      transition(je.getPhase(), true);
-
-      switch (je.getPhase()) {
-
-        case SET_MERGING:
-          if (services != null
-              && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED,
-                  mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-            return false;
-        }
-          break;
-
-        case CREATED_MERGE_DIR:
-          this.region_a.writestate.writesEnabled = true;
-          this.region_b.writestate.writesEnabled = true;
-          this.region_a.getRegionFileSystem().cleanupMergesDir();
-          break;
-
-        case CLOSED_REGION_A:
-          try {
-            // So, this returns a seqid but if we just closed and then reopened,
-            // we should be ok. On close, we flushed using sequenceid obtained
-            // from hosting regionserver so no need to propagate the sequenceid
-            // returned out of initialize below up into regionserver as we
-            // normally do.
-            this.region_a.initialize();
-          } catch (IOException e) {
-            LOG.error("Failed rollbacking CLOSED_REGION_A of region "
-                + region_a.getRegionInfo().getRegionNameAsString(), e);
-            throw new RuntimeException(e);
-          }
-          break;
-
-        case OFFLINED_REGION_A:
-          if (services != null)
-            services.addToOnlineRegions(this.region_a);
-          break;
-
-        case CLOSED_REGION_B:
-          try {
-            this.region_b.initialize();
-          } catch (IOException e) {
-            LOG.error("Failed rollbacking CLOSED_REGION_A of region "
-                + region_b.getRegionInfo().getRegionNameAsString(), e);
-            throw new RuntimeException(e);
-          }
-          break;
-
-        case OFFLINED_REGION_B:
-          if (services != null)
-            services.addToOnlineRegions(this.region_b);
-          break;
-
-        case STARTED_MERGED_REGION_CREATION:
-          this.region_a.getRegionFileSystem().cleanupMergedRegion(
-              this.mergedRegionInfo);
-          break;
-
-        case PONR:
-          // We got to the point-of-no-return so we need to just abort. Return
-          // immediately. Do not clean up created merged regions.
-          return false;
-
-         // Informational states only
-        case STARTED:
-        case PREPARED:
-        case COMPLETED:
-          break;
-
-        default:
-          throw new RuntimeException("Unhandled journal entry: " + je);
-      }
-    }
-    // Coprocessor callback
-    if (rsCoprocessorHost != null) {
-      rsCoprocessorHost.postRollBackMerge(region_a, region_b, user);
-    }
-
-    return result;
-  }
-
-  @Override
-  public HRegionInfo getMergedRegionInfo() {
-    return this.mergedRegionInfo;
-  }
-
-  @VisibleForTesting
-  Path getMergesDir() {
-    return this.mergesdir;
-  }
-
-  /**
-   * Checks if the given region has merge qualifier in hbase:meta
-   * @param services
-   * @param regionName name of specified region
-   * @return true if the given region has merge qualifier in META.(It will be
-   *         cleaned by CatalogJanitor)
-   * @throws IOException
-   */
-  @VisibleForTesting
-  boolean hasMergeQualifierInMeta(final RegionServerServices services, final byte[] regionName)
-      throws IOException {
-    if (services == null) return false;
-    // Get merge regions if it is a merged region and already has merge
-    // qualifier
-    Pair<HRegionInfo, HRegionInfo> mergeRegions = MetaTableAccessor
-        .getRegionsFromMergeQualifier(services.getConnection(), regionName);
-    if (mergeRegions != null &&
-        (mergeRegions.getFirst() != null || mergeRegions.getSecond() != null)) {
-      // It has merge qualifier
-      return true;
-    }
-    return false;
-  }
-
-  @Override
-  public List<JournalEntry> getJournal() {
-    return journal;
-  }
-
-  @Override
-  public RegionMergeTransaction registerTransactionListener(TransactionListener listener) {
-    listeners.add(listener);
-    return this;
-  }
-
-  @Override
-  public Server getServer() {
-    return server;
-  }
-
-  @Override
-  public RegionServerServices getRegionServerServices() {
-    return rsServices;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
deleted file mode 100644
index 307568c..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HMerge.java
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.Random;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * A non-instantiable class that has a static method capable of compacting
- * a table by merging adjacent regions.
- */
-@InterfaceAudience.Private
-class HMerge {
-  // TODO: Where is this class used?  How does it relate to Merge in same package?
-  private static final Log LOG = LogFactory.getLog(HMerge.class);
-  static final Random rand = new Random();
-
-  /*
-   * Not instantiable
-   */
-  private HMerge() {
-    super();
-  }
-
-  /**
-   * Scans the table and merges two adjacent regions if they are small. This
-   * only happens when a lot of rows are deleted.
-   *
-   * When merging the hbase:meta region, the HBase instance must be offline.
-   * When merging a normal table, the HBase instance must be online, but the
-   * table must be disabled.
-   *
-   * @param conf        - configuration object for HBase
-   * @param fs          - FileSystem where regions reside
-   * @param tableName   - Table to be compacted
-   * @throws IOException
-   */
-  public static void merge(Configuration conf, FileSystem fs,
-    final TableName tableName)
-  throws IOException {
-    merge(conf, fs, tableName, true);
-  }
-
-  /**
-   * Scans the table and merges two adjacent regions if they are small. This
-   * only happens when a lot of rows are deleted.
-   *
-   * When merging the hbase:meta region, the HBase instance must be offline.
-   * When merging a normal table, the HBase instance must be online, but the
-   * table must be disabled.
-   *
-   * @param conf        - configuration object for HBase
-   * @param fs          - FileSystem where regions reside
-   * @param tableName   - Table to be compacted
-   * @param testMasterRunning True if we are to verify master is down before
-   * running merge
-   * @throws IOException
-   */
-  public static void merge(Configuration conf, FileSystem fs,
-    final TableName tableName, final boolean testMasterRunning)
-  throws IOException {
-    boolean masterIsRunning = false;
-    ClusterConnection hConnection = null;
-    if (testMasterRunning) {
-      try {
-        hConnection = (ClusterConnection) ConnectionFactory.createConnection(conf);
-        masterIsRunning = hConnection.isMasterRunning();
-      } finally {
-        if (hConnection != null) {
-          hConnection.close();
-        }
-      }
-    }
-    if (tableName.equals(TableName.META_TABLE_NAME)) {
-      if (masterIsRunning) {
-        throw new IllegalStateException(
-            "Can not compact hbase:meta table if instance is on-line");
-      }
-      // TODO reenable new OfflineMerger(conf, fs).process();
-    } else {
-      if(!masterIsRunning) {
-        throw new IllegalStateException(
-            "HBase instance must be running to merge a normal table");
-      }
-      try (Connection conn = ConnectionFactory.createConnection(conf);
-          Admin admin = conn.getAdmin()) {
-        if (!admin.isTableDisabled(tableName)) {
-          throw new TableNotDisabledException(tableName);
-        }
-      }
-      new OnlineMerger(conf, fs, tableName).process();
-    }
-  }
-
-  private static abstract class Merger {
-    protected final Configuration conf;
-    protected final FileSystem fs;
-    protected final Path rootDir;
-    protected final HTableDescriptor htd;
-    protected final WALFactory walFactory;
-    private final long maxFilesize;
-
-
-    protected Merger(Configuration conf, FileSystem fs, final TableName tableName)
-    throws IOException {
-      this.conf = conf;
-      this.fs = fs;
-      this.maxFilesize = conf.getLong(HConstants.HREGION_MAX_FILESIZE,
-          HConstants.DEFAULT_MAX_FILE_SIZE);
-
-      this.rootDir = FSUtils.getRootDir(conf);
-      Path tabledir = FSUtils.getTableDir(this.rootDir, tableName);
-      this.htd = FSTableDescriptors.getTableDescriptorFromFs(this.fs, tabledir);
-      String logname = "merge_" + System.currentTimeMillis() + HConstants.HREGION_LOGDIR_NAME;
-
-      final Configuration walConf = new Configuration(conf);
-      FSUtils.setRootDir(walConf, tabledir);
-      this.walFactory = new WALFactory(walConf, null, logname);
-    }
-
-    void process() throws IOException {
-      try {
-        for (HRegionInfo[] regionsToMerge = next();
-            regionsToMerge != null;
-            regionsToMerge = next()) {
-          if (!merge(regionsToMerge)) {
-            return;
-          }
-        }
-      } finally {
-        try {
-          walFactory.close();
-        } catch(IOException e) {
-          LOG.error(e);
-        }
-      }
-    }
-
-    protected boolean merge(final HRegionInfo[] info) throws IOException {
-      if (info.length < 2) {
-        LOG.info("only one region - nothing to merge");
-        return false;
-      }
-
-      HRegion currentRegion = null;
-      long currentSize = 0;
-      HRegion nextRegion = null;
-      long nextSize = 0;
-      for (int i = 0; i < info.length - 1; i++) {
-        if (currentRegion == null) {
-          currentRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i], this.htd,
-              walFactory.getWAL(info[i].getEncodedNameAsBytes(),
-                info[i].getTable().getNamespace()));
-          currentSize = currentRegion.getLargestHStoreSize();
-        }
-        nextRegion = HRegion.openHRegion(conf, fs, this.rootDir, info[i + 1], this.htd,
-            walFactory.getWAL(info[i + 1].getEncodedNameAsBytes(),
-              info[i + 1].getTable().getNamespace()));
-        nextSize = nextRegion.getLargestHStoreSize();
-
-        if ((currentSize + nextSize) <= (maxFilesize / 2)) {
-          // We merge two adjacent regions if their total size is less than
-          // one half of the desired maximum size
-          LOG.info("Merging regions " + currentRegion.getRegionInfo().getRegionNameAsString() +
-            " and " + nextRegion.getRegionInfo().getRegionNameAsString());
-          HRegion mergedRegion =
-            HRegion.mergeAdjacent(currentRegion, nextRegion);
-          updateMeta(currentRegion.getRegionInfo().getRegionName(),
-            nextRegion.getRegionInfo().getRegionName(), mergedRegion);
-          break;
-        }
-        LOG.info("not merging regions " +
-          Bytes.toStringBinary(currentRegion.getRegionInfo().getRegionName()) +
-            " and " + Bytes.toStringBinary(nextRegion.getRegionInfo().getRegionName()));
-        currentRegion.close();
-        currentRegion = nextRegion;
-        currentSize = nextSize;
-      }
-      if(currentRegion != null) {
-        currentRegion.close();
-      }
-      return true;
-    }
-
-    protected abstract HRegionInfo[] next() throws IOException;
-
-    protected abstract void updateMeta(final byte [] oldRegion1,
-      final byte [] oldRegion2, HRegion newRegion)
-    throws IOException;
-
-  }
-
-  /** Instantiated to compact a normal user table */
-  private static class OnlineMerger extends Merger {
-    private final TableName tableName;
-    private final Table table;
-    private final ResultScanner metaScanner;
-    private HRegionInfo latestRegion;
-
-    OnlineMerger(Configuration conf, FileSystem fs,
-      final TableName tableName)
-    throws IOException {
-      super(conf, fs, tableName);
-      this.tableName = tableName;
-      Connection connection = ConnectionFactory.createConnection(conf);
-      this.table = connection.getTable(TableName.META_TABLE_NAME);
-      this.metaScanner = table.getScanner(HConstants.CATALOG_FAMILY,
-          HConstants.REGIONINFO_QUALIFIER);
-      this.latestRegion = null;
-    }
-
-    private HRegionInfo nextRegion() throws IOException {
-      try {
-        Result results = getMetaRow();
-        if (results == null) {
-          return null;
-        }
-        HRegionInfo region = MetaTableAccessor.getHRegionInfo(results);
-        if (region == null) {
-          throw new NoSuchElementException("meta region entry missing " +
-              Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
-              Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
-        }
-        if (!region.getTable().equals(this.tableName)) {
-          return null;
-        }
-        return region;
-      } catch (IOException e) {
-        e = e instanceof RemoteException ? ((RemoteException) e).unwrapRemoteException() : e;
-        LOG.error("meta scanner error", e);
-        metaScanner.close();
-        throw e;
-      }
-    }
-
-    /*
-     * Check current row has a HRegionInfo.  Skip to next row if HRI is empty.
-     * @return A Map of the row content else null if we are off the end.
-     * @throws IOException
-     */
-    private Result getMetaRow() throws IOException {
-      Result currentRow = metaScanner.next();
-      boolean foundResult = false;
-      while (currentRow != null) {
-        LOG.info("Row: <" + Bytes.toStringBinary(currentRow.getRow()) + ">");
-        byte[] regionInfoValue = currentRow.getValue(HConstants.CATALOG_FAMILY,
-            HConstants.REGIONINFO_QUALIFIER);
-        if (regionInfoValue == null || regionInfoValue.length == 0) {
-          currentRow = metaScanner.next();
-          continue;
-        }
-        HRegionInfo region = MetaTableAccessor.getHRegionInfo(currentRow);
-        if (!region.getTable().equals(this.tableName)) {
-          currentRow = metaScanner.next();
-          continue;
-        }
-        foundResult = true;
-        break;
-      }
-      return foundResult ? currentRow : null;
-    }
-
-    @Override
-    protected HRegionInfo[] next() throws IOException {
-      List<HRegionInfo> regions = new ArrayList<HRegionInfo>();
-      if(latestRegion == null) {
-        latestRegion = nextRegion();
-      }
-      if(latestRegion != null) {
-        regions.add(latestRegion);
-      }
-      latestRegion = nextRegion();
-      if(latestRegion != null) {
-        regions.add(latestRegion);
-      }
-      return regions.toArray(new HRegionInfo[regions.size()]);
-    }
-
-    @Override
-    protected void updateMeta(final byte [] oldRegion1,
-        final byte [] oldRegion2,
-      HRegion newRegion)
-    throws IOException {
-      byte[][] regionsToDelete = {oldRegion1, oldRegion2};
-      for (int r = 0; r < regionsToDelete.length; r++) {
-        if(Bytes.equals(regionsToDelete[r], latestRegion.getRegionName())) {
-          latestRegion = null;
-        }
-        Delete delete = new Delete(regionsToDelete[r]);
-        table.delete(delete);
-        if(LOG.isDebugEnabled()) {
-          LOG.debug("updated columns in row: " + Bytes.toStringBinary(regionsToDelete[r]));
-        }
-      }
-      newRegion.getRegionInfo().setOffline(true);
-
-      MetaTableAccessor.addRegionToMeta(table, newRegion.getRegionInfo());
-
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("updated columns in row: "
-            + Bytes.toStringBinary(newRegion.getRegionInfo().getRegionName()));
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
deleted file mode 100644
index 7b96660..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/Merge.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.io.WritableComparator;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Utility that can merge any two regions in the same table: adjacent,
- * overlapping or disjoint.
- */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-public class Merge extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(Merge.class);
-  private Path rootdir;
-  private volatile MetaUtils utils;
-  private TableName tableName;               // Name of table
-  private volatile byte [] region1;        // Name of region 1
-  private volatile byte [] region2;        // Name of region 2
-  private volatile HRegionInfo mergeInfo = null;
-
-  @Override
-  public int run(String[] args) throws Exception {
-    if (parseArgs(args) != 0) {
-      return -1;
-    }
-
-    // Verify file system is up.
-    FileSystem fs = FileSystem.get(getConf());              // get DFS handle
-    LOG.info("Verifying that file system is available...");
-    try {
-      FSUtils.checkFileSystemAvailable(fs);
-    } catch (IOException e) {
-      LOG.fatal("File system is not available", e);
-      return -1;
-    }
-
-    // Verify HBase is down
-    LOG.info("Verifying that HBase is not running...");
-    try {
-      HBaseAdmin.available(getConf());
-      LOG.fatal("HBase cluster must be off-line, and is not. Aborting.");
-      return -1;
-    } catch (ZooKeeperConnectionException zkce) {
-      // If no zk, presume no master.
-    }
-
-    // Initialize MetaUtils and and get the root of the HBase installation
-
-    this.utils = new MetaUtils(getConf());
-    this.rootdir = FSUtils.getRootDir(getConf());
-    try {
-      mergeTwoRegions();
-      return 0;
-    } catch (IOException e) {
-      LOG.fatal("Merge failed", e);
-      return -1;
-
-    } finally {
-      if (this.utils != null) {
-        this.utils.shutdown();
-      }
-    }
-  }
-
-  /** @return HRegionInfo for merge result */
-  HRegionInfo getMergedHRegionInfo() {
-    return this.mergeInfo;
-  }
-
-  /*
-   * Merges two regions from a user table.
-   */
-  private void mergeTwoRegions() throws IOException {
-    LOG.info("Merging regions " + Bytes.toStringBinary(this.region1) + " and " +
-        Bytes.toStringBinary(this.region2) + " in table " + this.tableName);
-    HRegion meta = this.utils.getMetaRegion();
-    Get get = new Get(region1);
-    get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    Result result1 =  meta.get(get);
-    Preconditions.checkState(!result1.isEmpty(),
-        "First region cells can not be null");
-    HRegionInfo info1 = MetaTableAccessor.getHRegionInfo(result1);
-    if (info1 == null) {
-      throw new NullPointerException("info1 is null using key " +
-          Bytes.toStringBinary(region1) + " in " + meta);
-    }
-    get = new Get(region2);
-    get.addColumn(HConstants.CATALOG_FAMILY, HConstants.REGIONINFO_QUALIFIER);
-    Result result2 =  meta.get(get);
-    Preconditions.checkState(!result2.isEmpty(),
-        "Second region cells can not be null");
-    HRegionInfo info2 = MetaTableAccessor.getHRegionInfo(result2);
-    if (info2 == null) {
-      throw new NullPointerException("info2 is null using key " + meta);
-    }
-    HTableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(FileSystem.get(getConf()),
-      this.rootdir, this.tableName);
-    HRegion merged = merge(htd, meta, info1, info2);
-
-    LOG.info("Adding " + merged.getRegionInfo() + " to " +
-        meta.getRegionInfo());
-
-    HRegion.addRegionToMETA(meta, merged);
-    merged.close();
-  }
-
-  /*
-   * Actually merge two regions and update their info in the meta region(s)
-   * Returns HRegion object for newly merged region
-   */
-  private HRegion merge(final HTableDescriptor htd, HRegion meta,
-                        HRegionInfo info1, HRegionInfo info2)
-  throws IOException {
-    if (info1 == null) {
-      throw new IOException("Could not find " + Bytes.toStringBinary(region1) + " in " +
-          Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
-    }
-    if (info2 == null) {
-      throw new IOException("Could not find " + Bytes.toStringBinary(region2) + " in " +
-          Bytes.toStringBinary(meta.getRegionInfo().getRegionName()));
-    }
-    HRegion merged = null;
-    HRegion r1 = HRegion.openHRegion(info1, htd, utils.getLog(info1), getConf());
-    try {
-      HRegion r2 = HRegion.openHRegion(info2, htd, utils.getLog(info2), getConf());
-      try {
-        merged = HRegion.merge(r1, r2);
-      } finally {
-        if (!r2.isClosed()) {
-          r2.close();
-        }
-      }
-    } finally {
-      if (!r1.isClosed()) {
-        r1.close();
-      }
-    }
-
-    // Remove the old regions from meta.
-    // HRegion.merge has already deleted their files
-
-    removeRegionFromMeta(meta, info1);
-    removeRegionFromMeta(meta, info2);
-
-    this.mergeInfo = merged.getRegionInfo();
-    return merged;
-  }
-
-  /*
-   * Removes a region's meta information from the passed <code>meta</code>
-   * region.
-   *
-   * @param meta hbase:meta HRegion to be updated
-   * @param regioninfo HRegionInfo of region to remove from <code>meta</code>
-   *
-   * @throws IOException
-   */
-  private void removeRegionFromMeta(HRegion meta, HRegionInfo regioninfo)
-  throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Removing region: " + regioninfo + " from " + meta);
-    }
-
-    Delete delete  = new Delete(regioninfo.getRegionName(),
-        System.currentTimeMillis());
-    meta.delete(delete);
-  }
-
-  /**
-   * Parse given arguments and assign table name and regions names.
-   * (generic args are handled by ToolRunner.)
-   *
-   * @param args the arguments to parse
-   *
-   * @throws IOException
-   */
-  private int parseArgs(String[] args) throws IOException {
-    if (args.length != 3) {
-      usage();
-      return -1;
-    }
-    tableName = TableName.valueOf(args[0]);
-
-    region1 = Bytes.toBytesBinary(args[1]);
-    region2 = Bytes.toBytesBinary(args[2]);
-    int status = 0;
-    if (notInTable(tableName, region1) || notInTable(tableName, region2)) {
-      status = -1;
-    } else if (Bytes.equals(region1, region2)) {
-      LOG.error("Can't merge a region with itself");
-      status = -1;
-    }
-    return status;
-  }
-
-  private boolean notInTable(final TableName tn, final byte [] rn) {
-    if (WritableComparator.compareBytes(tn.getName(), 0, tn.getName().length,
-        rn, 0, tn.getName().length) != 0) {
-      LOG.error("Region " + Bytes.toStringBinary(rn) + " does not belong to table " +
-        tn);
-      return true;
-    }
-    return false;
-  }
-
-  private void usage() {
-    System.err
-        .println("For hadoop 0.21+, Usage: hbase org.apache.hadoop.hbase.util.Merge "
-            + "[-Dfs.defaultFS=hdfs://nn:port] <table-name> <region-1> <region-2>\n");
-  }
-
-  public static void main(String[] args) {
-    int status;
-    try {
-      status = ToolRunner.run(HBaseConfiguration.create(), new Merge(), args);
-    } catch (Exception e) {
-      LOG.error("exiting due to error", e);
-      status = -1;
-    }
-    System.exit(status);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index 65cedda..b138c7d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -55,7 +55,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.regionserver.StoreFile;
@@ -1189,15 +1189,17 @@ public class TestAdmin1 {
     assertTrue(gotException);
     // Try going to the master directly (that will skip the check in admin)
     try {
-      DispatchMergingRegionsRequest request = RequestConverter
-          .buildDispatchMergingRegionsRequest(
-            regions.get(1).getFirst().getEncodedNameAsBytes(),
-            regions.get(2).getFirst().getEncodedNameAsBytes(),
+      byte[][] nameofRegionsToMerge = new byte[2][];
+      nameofRegionsToMerge[0] =  regions.get(1).getFirst().getEncodedNameAsBytes();
+      nameofRegionsToMerge[1] = regions.get(2).getFirst().getEncodedNameAsBytes();
+      MergeTableRegionsRequest request = RequestConverter
+          .buildMergeTableRegionsRequest(
+            nameofRegionsToMerge,
             true,
             HConstants.NO_NONCE,
-            HConstants.NO_NONCE);
+            HConstants.NO_NONCE);   
       ((ClusterConnection) TEST_UTIL.getAdmin().getConnection()).getMaster()
-        .dispatchMergingRegions(null, request);
+        .mergeTableRegions(null, request);
     } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException m) {
       Throwable t = m.getCause();
       do {
@@ -1209,40 +1211,6 @@ public class TestAdmin1 {
       } while (t != null);
     }
     assertTrue(gotException);
-    gotException = false;
-    // Try going to the regionservers directly
-    // first move the region to the same regionserver
-    if (!regions.get(2).getSecond().equals(regions.get(1).getSecond())) {
-      moveRegionAndWait(regions.get(2).getFirst(), regions.get(1).getSecond());
-    }
-    try {
-      AdminService.BlockingInterface admin = ((ClusterConnection) TEST_UTIL.getAdmin()
-        .getConnection()).getAdmin(regions.get(1).getSecond());
-      ProtobufUtil.mergeRegions(null, admin, regions.get(1).getFirst(), regions.get(2).getFirst(),
-        true, null);
-    } catch (MergeRegionException mm) {
-      gotException = true;
-    }
-    assertTrue(gotException);
-  }
-
-  private void moveRegionAndWait(HRegionInfo destRegion, ServerName destServer)
-      throws InterruptedException, MasterNotRunningException,
-      ZooKeeperConnectionException, IOException {
-    HMaster master = TEST_UTIL.getMiniHBaseCluster().getMaster();
-    TEST_UTIL.getAdmin().move(
-        destRegion.getEncodedNameAsBytes(),
-        Bytes.toBytes(destServer.getServerName()));
-    while (true) {
-      ServerName serverName = master.getAssignmentManager()
-          .getRegionStates().getRegionServerOfRegion(destRegion);
-      if (serverName != null && serverName.equals(destServer)) {
-        TEST_UTIL.assertRegionOnServer(
-            destRegion, serverName, 200);
-        break;
-      }
-      Thread.sleep(10);
-    }
   }
 
   @Test (expected=IllegalArgumentException.class, timeout=300000)

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
deleted file mode 100644
index 4a62bff..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerObserver.java
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.coprocessor;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.MiniHBaseCluster;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.hadoop.hbase.regionserver.Region;
-import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionFactory;
-import org.apache.hadoop.hbase.regionserver.RegionMergeTransactionImpl;
-import org.apache.hadoop.hbase.regionserver.RegionServerCoprocessorHost;
-import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Ignore;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests invocation of the {@link org.apache.hadoop.hbase.coprocessor.RegionServerObserver}
- * interface hooks at all appropriate times during normal HMaster operations.
- */
-@Category({CoprocessorTests.class, MediumTests.class})
-public class TestRegionServerObserver {
-  private static final Log LOG = LogFactory.getLog(TestRegionServerObserver.class);
-
-  /**
-   * Test verifies the hooks in regions merge.
-   * @throws Exception
-   */
-  @Ignore
-  @Test
-  public void testCoprocessorHooksInRegionsMerge() throws Exception {
-    final int NUM_MASTERS = 1;
-    final int NUM_RS = 1;
-    final String TABLENAME = "testRegionServerObserver";
-    final String TABLENAME2 = "testRegionServerObserver_2";
-    final byte[] FAM = Bytes.toBytes("fam");
-
-    // Create config to use for this cluster
-    Configuration conf = HBaseConfiguration.create();
-    conf.setClass("hbase.coprocessor.regionserver.classes", CPRegionServerObserver.class,
-      RegionServerObserver.class);
-
-    // Start the cluster
-    HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
-    TEST_UTIL.startMiniCluster(NUM_MASTERS, NUM_RS);
-    Admin admin = TEST_UTIL.getHBaseAdmin();
-    try {
-      MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
-      HRegionServer regionServer = cluster.getRegionServer(0);
-      RegionServerCoprocessorHost cpHost = regionServer.getRegionServerCoprocessorHost();
-      Coprocessor coprocessor = cpHost.findCoprocessor(CPRegionServerObserver.class.getName());
-      CPRegionServerObserver regionServerObserver = (CPRegionServerObserver) coprocessor;
-      HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLENAME));
-      desc.addFamily(new HColumnDescriptor(FAM));
-      admin.createTable(desc, new byte[][] { Bytes.toBytes("row") });
-      desc = new HTableDescriptor(TableName.valueOf(TABLENAME2));
-      desc.addFamily(new HColumnDescriptor(FAM));
-      admin.createTable(desc, new byte[][] { Bytes.toBytes("row") });
-      assertFalse(regionServerObserver.wasRegionMergeCalled());
-      List<Region> regions = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME));
-      admin.mergeRegionsAsync(
-        regions.get(0).getRegionInfo().getEncodedNameAsBytes(),
-        regions.get(1).getRegionInfo().getEncodedNameAsBytes(),
-        true);
-      int regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size();
-      while (regionsCount != 1) {
-        regionsCount = regionServer.getOnlineRegions(TableName.valueOf(TABLENAME)).size();
-        Thread.sleep(1000);
-      }
-      assertTrue(regionServerObserver.wasRegionMergeCalled());
-      assertTrue(regionServerObserver.wasPreMergeCommit());
-      assertTrue(regionServerObserver.wasPostMergeCommit());
-      assertEquals(regionsCount, 1);
-      assertEquals(regionServer.getOnlineRegions(TableName.valueOf(TABLENAME2)).size(), 1);
-    } finally {
-      if (admin != null) admin.close();
-      TEST_UTIL.shutdownMiniCluster();
-    }
-  }
-
-  public static class CPRegionServerObserver extends BaseRegionServerObserver {
-    private RegionMergeTransactionImpl rmt = null;
-    private HRegion mergedRegion = null;
-
-    private boolean preMergeCalled;
-    private boolean preMergeBeforePONRCalled;
-    private boolean preMergeAfterPONRCalled;
-    private boolean preRollBackMergeCalled;
-    private boolean postRollBackMergeCalled;
-    private boolean postMergeCalled;
-
-    public void resetStates() {
-      preMergeCalled = false;
-      preMergeBeforePONRCalled = false;
-      preMergeAfterPONRCalled = false;
-      preRollBackMergeCalled = false;
-      postRollBackMergeCalled = false;
-      postMergeCalled = false;
-    }
-
-    @Override
-    public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
-        Region regionB) throws IOException {
-      preMergeCalled = true;
-    }
-
-    @Override
-    public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        Region regionA, Region regionB, List<Mutation> metaEntries) throws IOException {
-      preMergeBeforePONRCalled = true;
-      RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
-      HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
-      List<Region> onlineRegions =
-          rs.getOnlineRegions(TableName.valueOf("testRegionServerObserver_2"));
-      rmt = (RegionMergeTransactionImpl) new RegionMergeTransactionFactory(rs.getConfiguration())
-        .create(onlineRegions.get(0), onlineRegions.get(1), true);
-      if (!rmt.prepare(rs)) {
-        LOG.error("Prepare for the region merge of table "
-            + onlineRegions.get(0).getTableDesc().getNameAsString()
-            + " failed. So returning null. ");
-        ctx.bypass();
-        return;
-      }
-      mergedRegion = rmt.stepsBeforePONR(rs, rs, false);
-      rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(), regionA.getRegionInfo(),
-        regionB.getRegionInfo(), rs.getServerName(), metaEntries);
-      MetaTableAccessor.mutateMetaTable(rs.getConnection(), metaEntries);
-    }
-
-    @Override
-    public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        Region regionA, Region regionB, Region mr) throws IOException {
-      preMergeAfterPONRCalled = true;
-      RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
-      HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
-      rmt.stepsAfterPONR(rs, rs, this.mergedRegion, null);
-    }
-
-    @Override
-    public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        Region regionA, Region regionB) throws IOException {
-      preRollBackMergeCalled = true;
-    }
-
-    @Override
-    public void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
-        Region regionA, Region regionB) throws IOException {
-      postRollBackMergeCalled = true;
-    }
-
-    @Override
-    public void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
-        Region regionB, Region mergedRegion) throws IOException {
-      postMergeCalled = true;
-    }
-
-    public boolean wasPreMergeCalled() {
-      return this.preMergeCalled;
-    }
-
-    public boolean wasPostMergeCalled() {
-      return this.postMergeCalled;
-    }
-
-    public boolean wasPreMergeCommit() {
-      return this.preMergeBeforePONRCalled;
-    }
-
-    public boolean wasPostMergeCommit() {
-      return this.preMergeAfterPONRCalled;
-    }
-
-    public boolean wasPreRollBackMerge() {
-      return this.preRollBackMergeCalled;
-    }
-
-    public boolean wasPostRollBackMerge() {
-      return this.postRollBackMergeCalled;
-    }
-
-    public boolean wasRegionMergeCalled() {
-      return this.preMergeCalled && this.postMergeCalled;
-    }
-
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 28bf14a..48cf8a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -320,16 +320,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
-  public long dispatchMergingRegions(
-      final HRegionInfo region_a,
-      final HRegionInfo region_b,
-      final boolean forcible,
-      final long nonceGroup,
-      final long nonce) throws IOException {
-    return -1;
-  }
-
-  @Override
   public boolean isActiveMaster() {
     return true;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 950ec92..c5f294a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -68,8 +68,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
@@ -531,13 +529,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public MergeRegionsResponse mergeRegions(RpcController controller,
-      MergeRegionsRequest request) throws ServiceException {
-    // TODO Auto-generated method stub
-    return null;
-  }
-
-  @Override
   public CompactRegionResponse compactRegion(RpcController controller,
       CompactRegionRequest request) throws ServiceException {
     // TODO Auto-generated method stub

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java
deleted file mode 100644
index c011321..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestDispatchMergingRegionsProcedure.java
+++ /dev/null
@@ -1,264 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.DispatchMergingRegionsState;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-@Category({MasterTests.class, MediumTests.class})
-public class TestDispatchMergingRegionsProcedure {
-  private static final Log LOG = LogFactory.getLog(TestDispatchMergingRegionsProcedure.class);
-
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
-  final static Configuration conf = UTIL.getConfiguration();
-  private static Admin admin;
-
-  private static void setupConf(Configuration conf) {
-    // Reduce the maximum attempts to speed up the test
-    conf.setInt("hbase.assignment.maximum.attempts", 3);
-    conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
-    conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);
-
-    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    setupConf(conf);
-    UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
-  }
-
-  @AfterClass
-  public static void cleanupTest() throws Exception {
-    try {
-      UTIL.shutdownMiniCluster();
-    } catch (Exception e) {
-      LOG.warn("failure shutting down cluster", e);
-    }
-  }
-
-  @Before
-  public void setup() throws Exception {
-    resetProcExecutorTestingKillFlag();
-
-    // Turn off balancer so it doesn't cut in and mess up our placements.
-    UTIL.getHBaseAdmin().setBalancerRunning(false, true);
-    // Turn off the meta scanner so it don't remove parent on us.
-    UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false);
-    resetProcExecutorTestingKillFlag();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    resetProcExecutorTestingKillFlag();
-    for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
-      LOG.info("Tear down, remove table=" + htd.getTableName());
-      UTIL.deleteTable(htd.getTableName());
-    }
-  }
-
-  private void resetProcExecutorTestingKillFlag() {
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    assertTrue("expected executor to be running", procExec.isRunning());
-  }
-
-  /**
-   * This tests two region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeTwoRegions() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeTwoRegions");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName, 3);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    final int initCompletedTaskCount = countOfCompletedMergeTaskCount();
-    long procId = procExec.submitProcedure(new DispatchMergingRegionsProcedure(
-      procExec.getEnvironment(), tableName, regionsToMerge, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, 2, 1, initCompletedTaskCount);
-  }
-
-  /**
-   * This tests two concurrent region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeRegionsConcurrently() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName, 4);
-
-    HRegionInfo[] regionsToMerge1 = new HRegionInfo[2];
-    HRegionInfo[] regionsToMerge2 = new HRegionInfo[2];
-    regionsToMerge1[0] = tableRegions.get(0);
-    regionsToMerge1[1] = tableRegions.get(1);
-    regionsToMerge2[0] = tableRegions.get(2);
-    regionsToMerge2[1] = tableRegions.get(3);
-
-    final int initCompletedTaskCount = countOfCompletedMergeTaskCount();
-    long procId1 = procExec.submitProcedure(new DispatchMergingRegionsProcedure(
-      procExec.getEnvironment(), tableName, regionsToMerge1, true));
-    long procId2 = procExec.submitProcedure(new DispatchMergingRegionsProcedure(
-      procExec.getEnvironment(), tableName, regionsToMerge2, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId1);
-    ProcedureTestingUtility.waitProcedure(procExec, procId2);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
-    assertRegionCount(tableName, 2, 2, initCompletedTaskCount);
-  }
-
-  private void waitForCompletedMergeTask(int expectedTaskCount, int initCompletedTaskCount)
-      throws IOException, InterruptedException {
-    while (true) {
-      long currentCompletedTaskCount = countOfCompletedMergeTaskCount() - initCompletedTaskCount;
-      if (currentCompletedTaskCount == expectedTaskCount) {
-        return;
-      }
-      LOG.info("There are " + (expectedTaskCount - currentCompletedTaskCount) +
-        " merge requests are not completed, wait 100 ms");
-      TimeUnit.MILLISECONDS.sleep(100);
-    }
-  }
-
-  private static int countOfCompletedMergeTaskCount() {
-    int completedTaskCount = 0;
-    for (RegionServerThread server : UTIL.getMiniHBaseCluster().getRegionServerThreads()) {
-      completedTaskCount += server.getRegionServer().getCompactSplitThread().getCompletedMergeTaskCount();
-    }
-    return completedTaskCount;
-  }
-
-  @Test(timeout=60000)
-  public void testRecoveryAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName, 3);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    final int initCompletedTaskCount = countOfCompletedMergeTaskCount();
-    long procId = procExec.submitProcedure(
-      new DispatchMergingRegionsProcedure(
-        procExec.getEnvironment(), tableName, regionsToMerge, true));
-
-    // Restart the executor and execute the step twice
-    int numberOfSteps = DispatchMergingRegionsState.values().length;
-    MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, 2, 1, initCompletedTaskCount);
-  }
-
-  @Test(timeout = 60000)
-  public void testRollbackAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName, 3);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(
-      new DispatchMergingRegionsProcedure(
-        procExec.getEnvironment(), tableName, regionsToMerge, true));
-
-    int numberOfSteps = DispatchMergingRegionsState.values().length - 3;
-    MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
-  }
-
-  private List<HRegionInfo> createTable(final TableName tableName, final int nregions)
-      throws Exception {
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(FAMILY));
-    byte[][] splitRows = new byte[nregions - 1][];
-    for (int i = 0; i < splitRows.length; ++i) {
-      splitRows[i] = Bytes.toBytes(String.format("%d", i));
-    }
-    admin.createTable(desc, splitRows);
-    return assertRegionCount(tableName, nregions);
-  }
-
-  public List<HRegionInfo> assertRegionCount(final TableName tableName, final int nregions)
-      throws Exception {
-    UTIL.waitUntilNoRegionsInTransition();
-    List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);
-    assertEquals(nregions, tableRegions.size());
-    return tableRegions;
-  }
-
-  public List<HRegionInfo> assertRegionCount(final TableName tableName, final int nregions,
-      int expectedTaskCount, int initCompletedTaskCount) throws Exception {
-    waitForCompletedMergeTask(expectedTaskCount, initCompletedTaskCount);
-    return assertRegionCount(tableName, nregions);
-  }
-
-  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
-    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index a63fec6..c973471 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -2646,55 +2646,6 @@ public class TestHRegion {
     }
   }
 
-  // ////////////////////////////////////////////////////////////////////////////
-  // Merge test
-  // ////////////////////////////////////////////////////////////////////////////
-  @Test
-  public void testMerge() throws IOException {
-    byte[][] families = { fam1, fam2, fam3 };
-    Configuration hc = initSplit();
-    // Setting up region
-    this.region = initHRegion(tableName, method, hc, families);
-    try {
-      LOG.info("" + HBaseTestCase.addContent(region, fam3));
-      region.flush(true);
-      region.compactStores();
-      byte[] splitRow = region.checkSplit();
-      assertNotNull(splitRow);
-      LOG.info("SplitRow: " + Bytes.toString(splitRow));
-      HRegion[] subregions = splitRegion(region, splitRow);
-      try {
-        // Need to open the regions.
-        for (int i = 0; i < subregions.length; i++) {
-          HRegion.openHRegion(subregions[i], null);
-          subregions[i].compactStores();
-        }
-        Path oldRegionPath = region.getRegionFileSystem().getRegionDir();
-        Path oldRegion1 = subregions[0].getRegionFileSystem().getRegionDir();
-        Path oldRegion2 = subregions[1].getRegionFileSystem().getRegionDir();
-        long startTime = System.currentTimeMillis();
-        region = HRegion.mergeAdjacent(subregions[0], subregions[1]);
-        LOG.info("Merge regions elapsed time: "
-            + ((System.currentTimeMillis() - startTime) / 1000.0));
-        FILESYSTEM.delete(oldRegion1, true);
-        FILESYSTEM.delete(oldRegion2, true);
-        FILESYSTEM.delete(oldRegionPath, true);
-        LOG.info("splitAndMerge completed.");
-      } finally {
-        for (int i = 0; i < subregions.length; i++) {
-          try {
-            HBaseTestingUtility.closeRegionAndWAL(subregions[i]);
-          } catch (IOException e) {
-            // Ignore.
-          }
-        }
-      }
-    } finally {
-      HBaseTestingUtility.closeRegionAndWAL(this.region);
-      this.region = null;
-    }
-  }
-
   /**
    * @param parent
    *          Region to split.


[46/50] [abbrv] hbase git commit: HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)

Posted by el...@apache.org.
HBASE-16995 Build client Java API and client protobuf messages (Josh Elser)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df2916fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df2916fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df2916fc

Branch: refs/heads/HBASE-16961
Commit: df2916fc2bc4cd55401d15ee4b069b7a17cefd64
Parents: 980c8c2
Author: tedyu <yu...@gmail.com>
Authored: Thu Nov 17 10:19:52 2016 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Mon Jan 23 17:48:59 2017 -0500

----------------------------------------------------------------------
 .../hbase/quotas/QuotaSettingsFactory.java      |   47 +
 .../apache/hadoop/hbase/quotas/QuotaType.java   |    1 +
 .../hadoop/hbase/quotas/SpaceLimitSettings.java |  166 ++
 .../hbase/quotas/SpaceViolationPolicy.java      |   44 +
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   51 +
 .../hbase/quotas/TestQuotaSettingsFactory.java  |  148 ++
 .../hbase/quotas/TestSpaceLimitSettings.java    |  119 ++
 .../shaded/protobuf/generated/MasterProtos.java |  570 ++++--
 .../shaded/protobuf/generated/QuotaProtos.java  | 1739 +++++++++++++++++-
 .../src/main/protobuf/Master.proto              |    2 +
 .../src/main/protobuf/Quota.proto               |   21 +
 .../hbase/protobuf/generated/QuotaProtos.java   | 1682 ++++++++++++++++-
 hbase-protocol/src/main/protobuf/Quota.proto    |   21 +
 13 files changed, 4284 insertions(+), 327 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
index a7c49b3..b8e99b8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaSettingsFactory.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRe
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
 
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
@@ -91,6 +92,9 @@ public class QuotaSettingsFactory {
     if (quotas.getBypassGlobals() == true) {
       settings.add(new QuotaGlobalsSettingsBypass(userName, tableName, namespace, true));
     }
+    if (quotas.hasSpace()) {
+      settings.add(fromSpace(tableName, namespace, quotas.getSpace()));
+    }
     return settings;
   }
 
@@ -124,6 +128,18 @@ public class QuotaSettingsFactory {
     return settings;
   }
 
+  static QuotaSettings fromSpace(TableName table, String namespace, SpaceQuota protoQuota) {
+    if ((null == table && null == namespace) || (null != table && null != namespace)) {
+      throw new IllegalArgumentException("Can only construct SpaceLimitSettings for a table or namespace.");
+    }
+    if (null != table) {
+      return SpaceLimitSettings.fromSpaceQuota(table, protoQuota);
+    } else {
+      // namespace must be non-null
+      return SpaceLimitSettings.fromSpaceQuota(namespace, protoQuota);
+    }
+  }
+
   /* ==========================================================================
    *  RPC Throttle
    */
@@ -280,4 +296,35 @@ public class QuotaSettingsFactory {
   public static QuotaSettings bypassGlobals(final String userName, final boolean bypassGlobals) {
     return new QuotaGlobalsSettingsBypass(userName, null, null, bypassGlobals);
   }
+
+  /* ==========================================================================
+   *  FileSystem Space Settings
+   */
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given table to the given size in bytes.
+   * When the space usage is exceeded by the table, the provided {@link SpaceViolationPolicy} is enacted on the table.
+   *
+   * @param tableName The name of the table on which the quota should be applied.
+   * @param sizeLimit The limit of a table's size in bytes.
+   * @param violationPolicy The action to take when the quota is exceeded.
+   * @return An {@link QuotaSettings} object.
+   */
+  public static QuotaSettings limitTableSpace(final TableName tableName, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
+    return new SpaceLimitSettings(tableName, sizeLimit, violationPolicy);
+  }
+
+  /**
+   * Creates a {@link QuotaSettings} object to limit the FileSystem space usage for the given namespace to the given size in bytes.
+   * When the space usage is exceeded by all tables in the namespace, the provided {@link SpaceViolationPolicy} is enacted on
+   * all tables in the namespace.
+   *
+   * @param namespace The namespace on which the quota should be applied.
+   * @param sizeLimit The limit of the namespace's size in bytes.
+   * @param violationPolicy The action to take when the the quota is exceeded.
+   * @return An {@link QuotaSettings} object.
+   */
+  public static QuotaSettings limitNamespaceSpace(final String namespace, long sizeLimit, final SpaceViolationPolicy violationPolicy) {
+    return new SpaceLimitSettings(namespace, sizeLimit, violationPolicy);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java
index 40a8b66..2c44201 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaType.java
@@ -28,4 +28,5 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 public enum QuotaType {
   THROTTLE,
   GLOBAL_BYPASS,
+  SPACE,
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
new file mode 100644
index 0000000..dded9b5
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceLimitSettings.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import java.util.Objects;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest.Builder;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+
+/**
+ * A {@link QuotaSettings} implementation for implementing filesystem-use quotas.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+class SpaceLimitSettings extends QuotaSettings {
+
+  private final SpaceLimitRequest proto;
+
+  SpaceLimitSettings(TableName tableName, long sizeLimit, SpaceViolationPolicy violationPolicy) {
+    super(null, Objects.requireNonNull(tableName), null);
+    if (0L > sizeLimit) {
+      throw new IllegalArgumentException("Size limit must be a non-negative value.");
+    }
+    proto = buildProtoQuota(sizeLimit, Objects.requireNonNull(violationPolicy));
+  }
+
+  SpaceLimitSettings(String namespace, long sizeLimit, SpaceViolationPolicy violationPolicy) {
+    super(null, null, Objects.requireNonNull(namespace));
+    if (0L > sizeLimit) {
+      throw new IllegalArgumentException("Size limit must be a non-negative value.");
+    }
+    proto = buildProtoQuota(sizeLimit, Objects.requireNonNull(violationPolicy));
+  }
+
+  /**
+   * Builds a {@link SpaceQuota} protobuf object given the arguments.
+   *
+   * @param sizeLimit The size limit of the quota.
+   * @param violationPolicy The action to take when the quota is exceeded.
+   * @return The protobuf SpaceQuota representation.
+   */
+  private SpaceLimitRequest buildProtoQuota(long sizeLimit, SpaceViolationPolicy violationPolicy) {
+    return SpaceLimitRequest.newBuilder().setQuota(
+        SpaceQuota.newBuilder()
+            .setSoftLimit(sizeLimit)
+            .setViolationPolicy(ProtobufUtil.toProtoViolationPolicy(violationPolicy))
+            .build())
+        .build();
+  }
+
+  /**
+   * Returns a copy of the internal state of <code>this</code>
+   */
+  SpaceLimitRequest getProto() {
+    return proto.toBuilder().build();
+  }
+
+  @Override
+  public QuotaType getQuotaType() {
+    return QuotaType.SPACE;
+  }
+
+  @Override
+  protected void setupSetQuotaRequest(Builder builder) {
+    // TableName/Namespace are serialized in QuotaSettings
+    builder.setSpaceLimit(proto);
+  }
+
+  /**
+   * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and tablename.
+   *
+   * @param tableName The target tablename for the limit.
+   * @param proto The protobuf representation.
+   * @return A QuotaSettings.
+   */
+  static SpaceLimitSettings fromSpaceQuota(final TableName tableName,
+      final QuotaProtos.SpaceQuota proto) {
+    validateProtoArguments(proto);
+    return new SpaceLimitSettings(tableName, proto.getSoftLimit(),
+        ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));
+  }
+
+  /**
+   * Constructs a {@link SpaceLimitSettings} from the provided protobuf message and namespace.
+   *
+   * @param namespace The target namespace for the limit.
+   * @param proto The protobuf representation.
+   * @return A QuotaSettings.
+   */
+  static SpaceLimitSettings fromSpaceQuota(final String namespace,
+      final QuotaProtos.SpaceQuota proto) {
+    validateProtoArguments(proto);
+    return new SpaceLimitSettings(namespace, proto.getSoftLimit(),
+        ProtobufUtil.toViolationPolicy(proto.getViolationPolicy()));
+  }
+
+  /**
+   * Validates that the provided protobuf SpaceQuota has the necessary information to construct
+   * a {@link SpaceLimitSettings}.
+   *
+   * @param proto The protobuf message to validate.
+   */
+  static void validateProtoArguments(final QuotaProtos.SpaceQuota proto) {
+    if (!Objects.requireNonNull(proto).hasSoftLimit()) {
+      throw new IllegalArgumentException("Cannot handle SpaceQuota without a soft limit");
+    }
+    if (!proto.hasViolationPolicy()) {
+      throw new IllegalArgumentException("Cannot handle SpaceQuota without a violation policy");
+    }
+  }
+
+  @Override
+  public int hashCode() {
+    return Objects.hash(getTableName(), getNamespace(), proto);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (o == this) {
+      return true;
+    }
+    if (!(o instanceof SpaceLimitSettings)) {
+      return false;
+    }
+    // o is non-null and an instance of SpaceLimitSettings
+    SpaceLimitSettings other = (SpaceLimitSettings) o;
+    return Objects.equals(getTableName(), other.getTableName()) &&
+        Objects.equals(getNamespace(), other.getNamespace()) &&
+        Objects.equals(proto, other.proto);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder();
+    sb.append("TYPE => SPACE");
+    if (null != getTableName()) {
+      sb.append(", TABLE => ").append(getTableName());
+    }
+    if (null != getNamespace()) {
+      sb.append(", NAMESPACE => ").append(getNamespace());
+    }
+    sb.append(", LIMIT => ").append(proto.getQuota().getSoftLimit());
+    sb.append(", VIOLATION_POLICY => ").append(proto.getQuota().getViolationPolicy());
+    return sb.toString();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
new file mode 100644
index 0000000..c63acb0
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Enumeration that represents the action HBase will take when a space quota is violated.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public enum SpaceViolationPolicy {
+  /**
+   * Disables the table(s).
+   */
+  DISABLE,
+  /**
+   * Disallows any mutations or compactions on the table(s).
+   */
+  NO_WRITES_COMPACTIONS,
+  /**
+   * Disallows any mutations (but allows compactions) on the table(s).
+   */
+  NO_WRITES,
+  /**
+   * Disallows any updates (but allows deletes and compactions) on the table(s).
+   */
+  NO_INSERTS,
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 7764f65..4f18138 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.quotas.QuotaScope;
 import org.apache.hadoop.hbase.quotas.QuotaType;
+import org.apache.hadoop.hbase.quotas.SpaceViolationPolicy;
 import org.apache.hadoop.hbase.quotas.ThrottleType;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSink;
 import org.apache.hadoop.hbase.replication.ReplicationLoadSource;
@@ -2497,6 +2498,7 @@ public final class ProtobufUtil {
   public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) {
     switch (proto) {
       case THROTTLE: return QuotaType.THROTTLE;
+      case SPACE: return QuotaType.SPACE;
     }
     throw new RuntimeException("Invalid QuotaType " + proto);
   }
@@ -2510,11 +2512,45 @@ public final class ProtobufUtil {
   public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) {
     switch (type) {
       case THROTTLE: return QuotaProtos.QuotaType.THROTTLE;
+      case SPACE: return QuotaProtos.QuotaType.SPACE;
     }
     throw new RuntimeException("Invalid QuotaType " + type);
   }
 
   /**
+   * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy.
+   *
+   * @param proto The protocol buffer space violation policy.
+   * @return The corresponding client SpaceViolationPolicy.
+   */
+  public static SpaceViolationPolicy toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) {
+    switch (proto) {
+      case DISABLE: return SpaceViolationPolicy.DISABLE;
+      case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS;
+      case NO_WRITES: return SpaceViolationPolicy.NO_WRITES;
+      case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS;
+    }
+    throw new RuntimeException("Invalid SpaceViolationPolicy " + proto);
+  }
+
+  /**
+   * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy.
+   *
+   * @param policy The client SpaceViolationPolicy object.
+   * @return The corresponding protocol buffer SpaceViolationPolicy.
+   */
+  public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy(
+      final SpaceViolationPolicy policy) {
+    switch (policy) {
+      case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE;
+      case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS;
+      case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES;
+      case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS;
+    }
+    throw new RuntimeException("Invalid SpaceViolationPolicy " + policy);
+  }
+
+  /**
    * Build a protocol buffer TimedQuota
    *
    * @param limit the allowed number of request/data per timeUnit
@@ -2532,6 +2568,21 @@ public final class ProtobufUtil {
   }
 
   /**
+   * Builds a protocol buffer SpaceQuota.
+   *
+   * @param limit The maximum space usage for the quota in bytes.
+   * @param violationPolicy The policy to apply when the quota is violated.
+   * @return The protocol buffer SpaceQuota.
+   */
+  public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit,
+      final SpaceViolationPolicy violationPolicy) {
+    return QuotaProtos.SpaceQuota.newBuilder()
+        .setSoftLimit(limit)
+        .setViolationPolicy(toProtoViolationPolicy(violationPolicy))
+        .build();
+  }
+
+  /**
    * Generates a marker for the WAL so that we propagate the notion of a bulk region load
    * throughout the WAL.
    *

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
new file mode 100644
index 0000000..17015d6
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Throttle;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test class for {@link QuotaSettingsFactory}.
+ */
+@Category(SmallTests.class)
+public class TestQuotaSettingsFactory {
+  
+  @Test
+  public void testAllQuotasAddedToList() {
+    final SpaceQuota spaceQuota = SpaceQuota.newBuilder()
+        .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G
+        .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table
+        .build();
+    final long readLimit = 1000;
+    final long writeLimit = 500;
+    final Throttle throttle = Throttle.newBuilder()
+        // 1000 read reqs/min
+        .setReadNum(TimedQuota.newBuilder().setSoftLimit(readLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build())
+        // 500 write reqs/min
+        .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build())
+        .build();
+    final Quotas quotas = Quotas.newBuilder()
+        .setSpace(spaceQuota) // Set the FS quotas
+        .setThrottle(throttle) // Set some RPC limits
+        .build();
+    final TableName tn = TableName.valueOf("my_table");
+    List<QuotaSettings> settings = QuotaSettingsFactory.fromTableQuotas(tn, quotas);
+    assertEquals(3, settings.size());
+    boolean seenRead = false;
+    boolean seenWrite = false;
+    boolean seenSpace = false;
+    for (QuotaSettings setting : settings) {
+      if (setting instanceof ThrottleSettings) {
+        ThrottleSettings throttleSettings = (ThrottleSettings) setting;
+        switch (throttleSettings.getThrottleType()) {
+          case READ_NUMBER:
+            assertFalse("Should not have multiple read quotas", seenRead);
+            assertEquals(readLimit, throttleSettings.getSoftLimit());
+            assertEquals(TimeUnit.MINUTES, throttleSettings.getTimeUnit());
+            assertEquals(tn, throttleSettings.getTableName());
+            assertNull("Username should be null", throttleSettings.getUserName());
+            assertNull("Namespace should be null", throttleSettings.getNamespace());
+            seenRead = true;
+            break;
+          case WRITE_NUMBER:
+            assertFalse("Should not have multiple write quotas", seenWrite);
+            assertEquals(writeLimit, throttleSettings.getSoftLimit());
+            assertEquals(TimeUnit.MINUTES, throttleSettings.getTimeUnit());
+            assertEquals(tn, throttleSettings.getTableName());
+            assertNull("Username should be null", throttleSettings.getUserName());
+            assertNull("Namespace should be null", throttleSettings.getNamespace());
+            seenWrite = true;
+            break;
+          default:
+            fail("Unexpected throttle type: " + throttleSettings.getThrottleType());
+        }
+      } else if (setting instanceof SpaceLimitSettings) {
+        assertFalse("Should not have multiple space quotas", seenSpace);
+        SpaceLimitSettings spaceLimit = (SpaceLimitSettings) setting;
+        assertEquals(tn, spaceLimit.getTableName());
+        assertNull("Username should be null", spaceLimit.getUserName());
+        assertNull("Namespace should be null", spaceLimit.getNamespace());
+        assertTrue("SpaceLimitSettings should have a SpaceQuota", spaceLimit.getProto().hasQuota());
+        assertEquals(spaceQuota, spaceLimit.getProto().getQuota());
+        seenSpace = true;
+      } else {
+        fail("Unexpected QuotaSettings implementation: " + setting.getClass());
+      }
+    }
+    assertTrue("Should have seen a read quota", seenRead);
+    assertTrue("Should have seen a write quota", seenWrite);
+    assertTrue("Should have seen a space quota", seenSpace);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testNeitherTableNorNamespace() {
+    final SpaceQuota spaceQuota = SpaceQuota.newBuilder()
+        .setSoftLimit(1L)
+        .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE)
+        .build();
+    QuotaSettingsFactory.fromSpace(null, null, spaceQuota);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testBothTableAndNamespace() {
+    final SpaceQuota spaceQuota = SpaceQuota.newBuilder()
+        .setSoftLimit(1L)
+        .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE)
+        .build();
+    QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota);
+  }
+
+  @Test
+  public void testSpaceLimitSettings() {
+    final TableName tableName = TableName.valueOf("foo");
+    final long sizeLimit = 1024L * 1024L * 1024L * 75; // 75GB
+    final SpaceViolationPolicy violationPolicy = SpaceViolationPolicy.NO_INSERTS;
+    QuotaSettings settings = QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy);
+    assertNotNull("QuotaSettings should not be null", settings);
+    assertTrue("Should be an instance of SpaceLimitSettings", settings instanceof SpaceLimitSettings);
+    SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings;
+    SpaceLimitRequest protoRequest = spaceLimitSettings.getProto();
+    assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota());
+    SpaceQuota quota = protoRequest.getQuota();
+    assertEquals(sizeLimit, quota.getSoftLimit());
+    assertEquals(violationPolicy, ProtobufUtil.toViolationPolicy(quota.getViolationPolicy()));
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java
new file mode 100644
index 0000000..77a00da
--- /dev/null
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.quotas;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test class for {@link SpaceLimitSettings}.
+ */
+@Category({SmallTests.class})
+public class TestSpaceLimitSettings {
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testInvalidTableQuotaSizeLimit() {
+    new SpaceLimitSettings(TableName.valueOf("foo"), -1, SpaceViolationPolicy.NO_INSERTS);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testNullTableName() {
+    TableName tn = null;
+    new SpaceLimitSettings(tn, 1, SpaceViolationPolicy.NO_INSERTS);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testNullTableViolationPolicy() {
+    new SpaceLimitSettings(TableName.valueOf("foo"), 1, null);
+  }
+
+  @Test(expected = IllegalArgumentException.class)
+  public void testInvalidNamespaceQuotaSizeLimit() {
+    new SpaceLimitSettings("foo_ns", -1, SpaceViolationPolicy.NO_INSERTS);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testNullNamespace() {
+    String ns = null;
+    new SpaceLimitSettings(ns, 1, SpaceViolationPolicy.NO_INSERTS);
+  }
+
+  @Test(expected = NullPointerException.class)
+  public void testNullNamespaceViolationPolicy() {
+    new SpaceLimitSettings("foo_ns", 1, null);
+  }
+
+  @Test
+  public void testTableQuota() {
+    final TableName tableName = TableName.valueOf("foo");
+    final long sizeLimit = 1024 * 1024;
+    final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_WRITES;
+    SpaceLimitSettings settings = new SpaceLimitSettings(tableName, sizeLimit, policy);
+    SetQuotaRequest proto = QuotaSettings.buildSetQuotaRequestProto(settings);
+
+    assertFalse("User should be missing", proto.hasUserName());
+    assertFalse("Namespace should be missing", proto.hasNamespace());
+    assertEquals(ProtobufUtil.toProtoTableName(tableName), proto.getTableName());
+    SpaceLimitRequest spaceLimitReq = proto.getSpaceLimit();
+    assertNotNull("SpaceLimitRequest was null", spaceLimitReq);
+    SpaceQuota spaceQuota = spaceLimitReq.getQuota();
+    assertNotNull("SpaceQuota was null", spaceQuota);
+    assertEquals(sizeLimit, spaceQuota.getSoftLimit());
+    assertEquals(ProtobufUtil.toProtoViolationPolicy(policy), spaceQuota.getViolationPolicy());
+
+    assertEquals(QuotaType.SPACE, settings.getQuotaType());
+
+    SpaceLimitSettings copy = new SpaceLimitSettings(tableName, sizeLimit, policy);
+    assertEquals(settings, copy);
+    assertEquals(settings.hashCode(), copy.hashCode());
+  }
+
+  @Test
+  public void testNamespaceQuota() {
+    final String namespace = "foo_ns";
+    final long sizeLimit = 1024 * 1024;
+    final SpaceViolationPolicy policy = SpaceViolationPolicy.NO_WRITES;
+    SpaceLimitSettings settings = new SpaceLimitSettings(namespace, sizeLimit, policy);
+    SetQuotaRequest proto = QuotaSettings.buildSetQuotaRequestProto(settings);
+
+    assertFalse("User should be missing", proto.hasUserName());
+    assertFalse("TableName should be missing", proto.hasTableName());
+    assertEquals(namespace, proto.getNamespace());
+    SpaceLimitRequest spaceLimitReq = proto.getSpaceLimit();
+    assertNotNull("SpaceLimitRequest was null", spaceLimitReq);
+    SpaceQuota spaceQuota = spaceLimitReq.getQuota();
+    assertNotNull("SpaceQuota was null", spaceQuota);
+    assertEquals(sizeLimit, spaceQuota.getSoftLimit());
+    assertEquals(ProtobufUtil.toProtoViolationPolicy(policy), spaceQuota.getViolationPolicy());
+
+    assertEquals(QuotaType.SPACE, settings.getQuotaType());
+
+    SpaceLimitSettings copy = new SpaceLimitSettings(namespace, sizeLimit, policy);
+    assertEquals(settings, copy);
+    assertEquals(settings.hashCode(), copy.hashCode());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/df2916fc/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 6a737b8..6305779 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -59598,6 +59598,19 @@ public final class MasterProtos {
      * <code>optional .hbase.pb.ThrottleRequest throttle = 7;</code>
      */
     org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequestOrBuilder getThrottleOrBuilder();
+
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    boolean hasSpaceLimit();
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit();
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder();
   }
   /**
    * Protobuf type {@code hbase.pb.SetQuotaRequest}
@@ -59700,6 +59713,19 @@ public final class MasterProtos {
               bitField0_ |= 0x00000040;
               break;
             }
+            case 66: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000080) == 0x00000080)) {
+                subBuilder = spaceLimit_.toBuilder();
+              }
+              spaceLimit_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(spaceLimit_);
+                spaceLimit_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000080;
+              break;
+            }
           }
         }
       } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
@@ -59923,6 +59949,27 @@ public final class MasterProtos {
       return throttle_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest.getDefaultInstance() : throttle_;
     }
 
+    public static final int SPACE_LIMIT_FIELD_NUMBER = 8;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest spaceLimit_;
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    public boolean hasSpaceLimit() {
+      return ((bitField0_ & 0x00000080) == 0x00000080);
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit() {
+      return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_;
+    }
+    /**
+     * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder() {
+      return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_;
+    }
+
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
       byte isInitialized = memoizedIsInitialized;
@@ -59968,6 +60015,9 @@ public final class MasterProtos {
       if (((bitField0_ & 0x00000040) == 0x00000040)) {
         output.writeMessage(7, getThrottle());
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        output.writeMessage(8, getSpaceLimit());
+      }
       unknownFields.writeTo(output);
     }
 
@@ -60001,6 +60051,10 @@ public final class MasterProtos {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
           .computeMessageSize(7, getThrottle());
       }
+      if (((bitField0_ & 0x00000080) == 0x00000080)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(8, getSpaceLimit());
+      }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
       return size;
@@ -60053,6 +60107,11 @@ public final class MasterProtos {
         result = result && getThrottle()
             .equals(other.getThrottle());
       }
+      result = result && (hasSpaceLimit() == other.hasSpaceLimit());
+      if (hasSpaceLimit()) {
+        result = result && getSpaceLimit()
+            .equals(other.getSpaceLimit());
+      }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
     }
@@ -60094,6 +60153,10 @@ public final class MasterProtos {
         hash = (37 * hash) + THROTTLE_FIELD_NUMBER;
         hash = (53 * hash) + getThrottle().hashCode();
       }
+      if (hasSpaceLimit()) {
+        hash = (37 * hash) + SPACE_LIMIT_FIELD_NUMBER;
+        hash = (53 * hash) + getSpaceLimit().hashCode();
+      }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -60210,6 +60273,7 @@ public final class MasterProtos {
                 .alwaysUseFieldBuilders) {
           getTableNameFieldBuilder();
           getThrottleFieldBuilder();
+          getSpaceLimitFieldBuilder();
         }
       }
       public Builder clear() {
@@ -60236,6 +60300,12 @@ public final class MasterProtos {
           throttleBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000040);
+        if (spaceLimitBuilder_ == null) {
+          spaceLimit_ = null;
+        } else {
+          spaceLimitBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000080);
         return this;
       }
 
@@ -60296,6 +60366,14 @@ public final class MasterProtos {
         } else {
           result.throttle_ = throttleBuilder_.build();
         }
+        if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
+          to_bitField0_ |= 0x00000080;
+        }
+        if (spaceLimitBuilder_ == null) {
+          result.spaceLimit_ = spaceLimit_;
+        } else {
+          result.spaceLimit_ = spaceLimitBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -60365,6 +60443,9 @@ public final class MasterProtos {
         if (other.hasThrottle()) {
           mergeThrottle(other.getThrottle());
         }
+        if (other.hasSpaceLimit()) {
+          mergeSpaceLimit(other.getSpaceLimit());
+        }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
         return this;
@@ -60930,6 +61011,124 @@ public final class MasterProtos {
         }
         return throttleBuilder_;
       }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest spaceLimit_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder> spaceLimitBuilder_;
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public boolean hasSpaceLimit() {
+        return ((bitField0_ & 0x00000080) == 0x00000080);
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest getSpaceLimit() {
+        if (spaceLimitBuilder_ == null) {
+          return spaceLimit_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_;
+        } else {
+          return spaceLimitBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public Builder setSpaceLimit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest value) {
+        if (spaceLimitBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          spaceLimit_ = value;
+          onChanged();
+        } else {
+          spaceLimitBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public Builder setSpaceLimit(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder builderForValue) {
+        if (spaceLimitBuilder_ == null) {
+          spaceLimit_ = builderForValue.build();
+          onChanged();
+        } else {
+          spaceLimitBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public Builder mergeSpaceLimit(org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest value) {
+        if (spaceLimitBuilder_ == null) {
+          if (((bitField0_ & 0x00000080) == 0x00000080) &&
+              spaceLimit_ != null &&
+              spaceLimit_ != org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance()) {
+            spaceLimit_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.newBuilder(spaceLimit_).mergeFrom(value).buildPartial();
+          } else {
+            spaceLimit_ = value;
+          }
+          onChanged();
+        } else {
+          spaceLimitBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000080;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public Builder clearSpaceLimit() {
+        if (spaceLimitBuilder_ == null) {
+          spaceLimit_ = null;
+          onChanged();
+        } else {
+          spaceLimitBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000080);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder getSpaceLimitBuilder() {
+        bitField0_ |= 0x00000080;
+        onChanged();
+        return getSpaceLimitFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder getSpaceLimitOrBuilder() {
+        if (spaceLimitBuilder_ != null) {
+          return spaceLimitBuilder_.getMessageOrBuilder();
+        } else {
+          return spaceLimit_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.getDefaultInstance() : spaceLimit_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.SpaceLimitRequest space_limit = 8;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder> 
+          getSpaceLimitFieldBuilder() {
+        if (spaceLimitBuilder_ == null) {
+          spaceLimitBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequest.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceLimitRequestOrBuilder>(
+                  getSpaceLimit(),
+                  getParentForChildren(),
+                  isClean());
+          spaceLimit_ = null;
+        }
+        return spaceLimitBuilder_;
+      }
       public final Builder setUnknownFields(
           final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
         return super.setUnknownFields(unknownFields);
@@ -73729,194 +73928,195 @@ public final class MasterProtos {
       "edureResponse\022\034\n\024is_procedure_aborted\030\001 " +
       "\002(\010\"\027\n\025ListProceduresRequest\"@\n\026ListProc" +
       "eduresResponse\022&\n\tprocedure\030\001 \003(\0132\023.hbas" +
-      "e.pb.Procedure\"\315\001\n\017SetQuotaRequest\022\021\n\tus" +
+      "e.pb.Procedure\"\377\001\n\017SetQuotaRequest\022\021\n\tus" +
       "er_name\030\001 \001(\t\022\022\n\nuser_group\030\002 \001(\t\022\021\n\tnam" +
       "espace\030\003 \001(\t\022\'\n\ntable_name\030\004 \001(\0132\023.hbase",
       ".pb.TableName\022\022\n\nremove_all\030\005 \001(\010\022\026\n\016byp" +
       "ass_globals\030\006 \001(\010\022+\n\010throttle\030\007 \001(\0132\031.hb" +
-      "ase.pb.ThrottleRequest\"\022\n\020SetQuotaRespon" +
-      "se\"J\n\037MajorCompactionTimestampRequest\022\'\n" +
-      "\ntable_name\030\001 \002(\0132\023.hbase.pb.TableName\"U" +
-      "\n(MajorCompactionTimestampForRegionReque" +
-      "st\022)\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpec" +
-      "ifier\"@\n MajorCompactionTimestampRespons" +
-      "e\022\034\n\024compaction_timestamp\030\001 \002(\003\"\035\n\033Secur" +
-      "ityCapabilitiesRequest\"\354\001\n\034SecurityCapab",
-      "ilitiesResponse\022G\n\014capabilities\030\001 \003(\01621." +
-      "hbase.pb.SecurityCapabilitiesResponse.Ca" +
-      "pability\"\202\001\n\nCapability\022\031\n\025SIMPLE_AUTHEN" +
-      "TICATION\020\000\022\031\n\025SECURE_AUTHENTICATION\020\001\022\021\n" +
-      "\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHORIZATION\020\003" +
-      "\022\023\n\017CELL_VISIBILITY\020\004\"\"\n ListDrainingReg" +
-      "ionServersRequest\"N\n!ListDrainingRegionS" +
-      "erversResponse\022)\n\013server_name\030\001 \003(\0132\024.hb" +
-      "ase.pb.ServerName\"F\n\031DrainRegionServersR" +
-      "equest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb.S",
-      "erverName\"\034\n\032DrainRegionServersResponse\"" +
-      "P\n#RemoveDrainFromRegionServersRequest\022)" +
-      "\n\013server_name\030\001 \003(\0132\024.hbase.pb.ServerNam" +
-      "e\"&\n$RemoveDrainFromRegionServersRespons" +
-      "e*(\n\020MasterSwitchType\022\t\n\005SPLIT\020\000\022\t\n\005MERG" +
-      "E\020\0012\2221\n\rMasterService\022e\n\024GetSchemaAlterS" +
-      "tatus\022%.hbase.pb.GetSchemaAlterStatusReq" +
-      "uest\032&.hbase.pb.GetSchemaAlterStatusResp" +
-      "onse\022b\n\023GetTableDescriptors\022$.hbase.pb.G" +
-      "etTableDescriptorsRequest\032%.hbase.pb.Get",
-      "TableDescriptorsResponse\022P\n\rGetTableName" +
-      "s\022\036.hbase.pb.GetTableNamesRequest\032\037.hbas" +
-      "e.pb.GetTableNamesResponse\022Y\n\020GetCluster" +
-      "Status\022!.hbase.pb.GetClusterStatusReques" +
-      "t\032\".hbase.pb.GetClusterStatusResponse\022V\n" +
-      "\017IsMasterRunning\022 .hbase.pb.IsMasterRunn" +
-      "ingRequest\032!.hbase.pb.IsMasterRunningRes" +
-      "ponse\022D\n\tAddColumn\022\032.hbase.pb.AddColumnR" +
-      "equest\032\033.hbase.pb.AddColumnResponse\022M\n\014D" +
-      "eleteColumn\022\035.hbase.pb.DeleteColumnReque",
-      "st\032\036.hbase.pb.DeleteColumnResponse\022M\n\014Mo" +
-      "difyColumn\022\035.hbase.pb.ModifyColumnReques" +
-      "t\032\036.hbase.pb.ModifyColumnResponse\022G\n\nMov" +
-      "eRegion\022\033.hbase.pb.MoveRegionRequest\032\034.h" +
-      "base.pb.MoveRegionResponse\022\\\n\021MergeTable" +
-      "Regions\022\".hbase.pb.MergeTableRegionsRequ" +
-      "est\032#.hbase.pb.MergeTableRegionsResponse" +
-      "\022M\n\014AssignRegion\022\035.hbase.pb.AssignRegion" +
-      "Request\032\036.hbase.pb.AssignRegionResponse\022" +
-      "S\n\016UnassignRegion\022\037.hbase.pb.UnassignReg",
-      "ionRequest\032 .hbase.pb.UnassignRegionResp" +
-      "onse\022P\n\rOfflineRegion\022\036.hbase.pb.Offline" +
-      "RegionRequest\032\037.hbase.pb.OfflineRegionRe" +
-      "sponse\022J\n\013DeleteTable\022\034.hbase.pb.DeleteT" +
-      "ableRequest\032\035.hbase.pb.DeleteTableRespon" +
-      "se\022P\n\rtruncateTable\022\036.hbase.pb.TruncateT" +
-      "ableRequest\032\037.hbase.pb.TruncateTableResp" +
-      "onse\022J\n\013EnableTable\022\034.hbase.pb.EnableTab" +
-      "leRequest\032\035.hbase.pb.EnableTableResponse" +
-      "\022M\n\014DisableTable\022\035.hbase.pb.DisableTable",
-      "Request\032\036.hbase.pb.DisableTableResponse\022" +
-      "J\n\013ModifyTable\022\034.hbase.pb.ModifyTableReq" +
-      "uest\032\035.hbase.pb.ModifyTableResponse\022J\n\013C" +
-      "reateTable\022\034.hbase.pb.CreateTableRequest" +
-      "\032\035.hbase.pb.CreateTableResponse\022A\n\010Shutd" +
-      "own\022\031.hbase.pb.ShutdownRequest\032\032.hbase.p" +
-      "b.ShutdownResponse\022G\n\nStopMaster\022\033.hbase" +
-      ".pb.StopMasterRequest\032\034.hbase.pb.StopMas" +
-      "terResponse\022h\n\031IsMasterInMaintenanceMode" +
-      "\022$.hbase.pb.IsInMaintenanceModeRequest\032%",
-      ".hbase.pb.IsInMaintenanceModeResponse\022>\n" +
-      "\007Balance\022\030.hbase.pb.BalanceRequest\032\031.hba" +
-      "se.pb.BalanceResponse\022_\n\022SetBalancerRunn" +
-      "ing\022#.hbase.pb.SetBalancerRunningRequest" +
-      "\032$.hbase.pb.SetBalancerRunningResponse\022\\" +
-      "\n\021IsBalancerEnabled\022\".hbase.pb.IsBalance" +
-      "rEnabledRequest\032#.hbase.pb.IsBalancerEna" +
-      "bledResponse\022k\n\026SetSplitOrMergeEnabled\022\'" +
-      ".hbase.pb.SetSplitOrMergeEnabledRequest\032" +
-      "(.hbase.pb.SetSplitOrMergeEnabledRespons",
-      "e\022h\n\025IsSplitOrMergeEnabled\022&.hbase.pb.Is" +
-      "SplitOrMergeEnabledRequest\032\'.hbase.pb.Is" +
-      "SplitOrMergeEnabledResponse\022D\n\tNormalize" +
-      "\022\032.hbase.pb.NormalizeRequest\032\033.hbase.pb." +
-      "NormalizeResponse\022e\n\024SetNormalizerRunnin" +
-      "g\022%.hbase.pb.SetNormalizerRunningRequest" +
-      "\032&.hbase.pb.SetNormalizerRunningResponse" +
-      "\022b\n\023IsNormalizerEnabled\022$.hbase.pb.IsNor" +
-      "malizerEnabledRequest\032%.hbase.pb.IsNorma" +
-      "lizerEnabledResponse\022S\n\016RunCatalogScan\022\037",
-      ".hbase.pb.RunCatalogScanRequest\032 .hbase." +
-      "pb.RunCatalogScanResponse\022e\n\024EnableCatal" +
-      "ogJanitor\022%.hbase.pb.EnableCatalogJanito" +
-      "rRequest\032&.hbase.pb.EnableCatalogJanitor" +
-      "Response\022n\n\027IsCatalogJanitorEnabled\022(.hb" +
-      "ase.pb.IsCatalogJanitorEnabledRequest\032)." +
-      "hbase.pb.IsCatalogJanitorEnabledResponse" +
-      "\022^\n\021ExecMasterService\022#.hbase.pb.Coproce" +
-      "ssorServiceRequest\032$.hbase.pb.Coprocesso" +
-      "rServiceResponse\022A\n\010Snapshot\022\031.hbase.pb.",
-      "SnapshotRequest\032\032.hbase.pb.SnapshotRespo" +
-      "nse\022h\n\025GetCompletedSnapshots\022&.hbase.pb." +
-      "GetCompletedSnapshotsRequest\032\'.hbase.pb." +
-      "GetCompletedSnapshotsResponse\022S\n\016DeleteS" +
-      "napshot\022\037.hbase.pb.DeleteSnapshotRequest" +
-      "\032 .hbase.pb.DeleteSnapshotResponse\022S\n\016Is" +
-      "SnapshotDone\022\037.hbase.pb.IsSnapshotDoneRe" +
-      "quest\032 .hbase.pb.IsSnapshotDoneResponse\022" +
-      "V\n\017RestoreSnapshot\022 .hbase.pb.RestoreSna" +
-      "pshotRequest\032!.hbase.pb.RestoreSnapshotR",
-      "esponse\022P\n\rExecProcedure\022\036.hbase.pb.Exec" +
-      "ProcedureRequest\032\037.hbase.pb.ExecProcedur" +
-      "eResponse\022W\n\024ExecProcedureWithRet\022\036.hbas" +
-      "e.pb.ExecProcedureRequest\032\037.hbase.pb.Exe" +
-      "cProcedureResponse\022V\n\017IsProcedureDone\022 ." +
-      "hbase.pb.IsProcedureDoneRequest\032!.hbase." +
-      "pb.IsProcedureDoneResponse\022V\n\017ModifyName" +
-      "space\022 .hbase.pb.ModifyNamespaceRequest\032" +
-      "!.hbase.pb.ModifyNamespaceResponse\022V\n\017Cr" +
-      "eateNamespace\022 .hbase.pb.CreateNamespace",
-      "Request\032!.hbase.pb.CreateNamespaceRespon" +
-      "se\022V\n\017DeleteNamespace\022 .hbase.pb.DeleteN" +
-      "amespaceRequest\032!.hbase.pb.DeleteNamespa" +
-      "ceResponse\022k\n\026GetNamespaceDescriptor\022\'.h" +
-      "base.pb.GetNamespaceDescriptorRequest\032(." +
-      "hbase.pb.GetNamespaceDescriptorResponse\022" +
-      "q\n\030ListNamespaceDescriptors\022).hbase.pb.L" +
-      "istNamespaceDescriptorsRequest\032*.hbase.p" +
-      "b.ListNamespaceDescriptorsResponse\022\206\001\n\037L" +
-      "istTableDescriptorsByNamespace\0220.hbase.p",
-      "b.ListTableDescriptorsByNamespaceRequest" +
-      "\0321.hbase.pb.ListTableDescriptorsByNamesp" +
-      "aceResponse\022t\n\031ListTableNamesByNamespace" +
-      "\022*.hbase.pb.ListTableNamesByNamespaceReq" +
-      "uest\032+.hbase.pb.ListTableNamesByNamespac" +
-      "eResponse\022P\n\rGetTableState\022\036.hbase.pb.Ge" +
-      "tTableStateRequest\032\037.hbase.pb.GetTableSt" +
-      "ateResponse\022A\n\010SetQuota\022\031.hbase.pb.SetQu" +
-      "otaRequest\032\032.hbase.pb.SetQuotaResponse\022x" +
-      "\n\037getLastMajorCompactionTimestamp\022).hbas",
-      "e.pb.MajorCompactionTimestampRequest\032*.h" +
-      "base.pb.MajorCompactionTimestampResponse" +
-      "\022\212\001\n(getLastMajorCompactionTimestampForR" +
-      "egion\0222.hbase.pb.MajorCompactionTimestam" +
-      "pForRegionRequest\032*.hbase.pb.MajorCompac" +
-      "tionTimestampResponse\022_\n\022getProcedureRes" +
-      "ult\022#.hbase.pb.GetProcedureResultRequest" +
-      "\032$.hbase.pb.GetProcedureResultResponse\022h" +
-      "\n\027getSecurityCapabilities\022%.hbase.pb.Sec" +
-      "urityCapabilitiesRequest\032&.hbase.pb.Secu",
-      "rityCapabilitiesResponse\022S\n\016AbortProcedu" +
-      "re\022\037.hbase.pb.AbortProcedureRequest\032 .hb" +
-      "ase.pb.AbortProcedureResponse\022S\n\016ListPro" +
-      "cedures\022\037.hbase.pb.ListProceduresRequest" +
-      "\032 .hbase.pb.ListProceduresResponse\022_\n\022Ad" +
-      "dReplicationPeer\022#.hbase.pb.AddReplicati" +
-      "onPeerRequest\032$.hbase.pb.AddReplicationP" +
-      "eerResponse\022h\n\025RemoveReplicationPeer\022&.h" +
-      "base.pb.RemoveReplicationPeerRequest\032\'.h" +
-      "base.pb.RemoveReplicationPeerResponse\022h\n",
-      "\025EnableReplicationPeer\022&.hbase.pb.Enable" +
-      "ReplicationPeerRequest\032\'.hbase.pb.Enable" +
-      "ReplicationPeerResponse\022k\n\026DisableReplic" +
-      "ationPeer\022\'.hbase.pb.DisableReplicationP" +
-      "eerRequest\032(.hbase.pb.DisableReplication" +
-      "PeerResponse\022q\n\030GetReplicationPeerConfig" +
-      "\022).hbase.pb.GetReplicationPeerConfigRequ" +
-      "est\032*.hbase.pb.GetReplicationPeerConfigR" +
-      "esponse\022z\n\033UpdateReplicationPeerConfig\022," +
-      ".hbase.pb.UpdateReplicationPeerConfigReq",
-      "uest\032-.hbase.pb.UpdateReplicationPeerCon" +
-      "figResponse\022e\n\024ListReplicationPeers\022%.hb" +
-      "ase.pb.ListReplicationPeersRequest\032&.hba" +
-      "se.pb.ListReplicationPeersResponse\022t\n\031li" +
-      "stDrainingRegionServers\022*.hbase.pb.ListD" +
-      "rainingRegionServersRequest\032+.hbase.pb.L" +
-      "istDrainingRegionServersResponse\022_\n\022drai" +
-      "nRegionServers\022#.hbase.pb.DrainRegionSer" +
-      "versRequest\032$.hbase.pb.DrainRegionServer" +
-      "sResponse\022}\n\034removeDrainFromRegionServer",
-      "s\022-.hbase.pb.RemoveDrainFromRegionServer" +
-      "sRequest\032..hbase.pb.RemoveDrainFromRegio" +
-      "nServersResponseBI\n1org.apache.hadoop.hb" +
-      "ase.shaded.protobuf.generatedB\014MasterPro" +
-      "tosH\001\210\001\001\240\001\001"
+      "ase.pb.ThrottleRequest\0220\n\013space_limit\030\010 " +
+      "\001(\0132\033.hbase.pb.SpaceLimitRequest\"\022\n\020SetQ" +
+      "uotaResponse\"J\n\037MajorCompactionTimestamp" +
+      "Request\022\'\n\ntable_name\030\001 \002(\0132\023.hbase.pb.T" +
+      "ableName\"U\n(MajorCompactionTimestampForR" +
+      "egionRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
+      "RegionSpecifier\"@\n MajorCompactionTimest" +
+      "ampResponse\022\034\n\024compaction_timestamp\030\001 \002(",
+      "\003\"\035\n\033SecurityCapabilitiesRequest\"\354\001\n\034Sec" +
+      "urityCapabilitiesResponse\022G\n\014capabilitie" +
+      "s\030\001 \003(\01621.hbase.pb.SecurityCapabilitiesR" +
+      "esponse.Capability\"\202\001\n\nCapability\022\031\n\025SIM" +
+      "PLE_AUTHENTICATION\020\000\022\031\n\025SECURE_AUTHENTIC" +
+      "ATION\020\001\022\021\n\rAUTHORIZATION\020\002\022\026\n\022CELL_AUTHO" +
+      "RIZATION\020\003\022\023\n\017CELL_VISIBILITY\020\004\"\"\n ListD" +
+      "rainingRegionServersRequest\"N\n!ListDrain" +
+      "ingRegionServersResponse\022)\n\013server_name\030" +
+      "\001 \003(\0132\024.hbase.pb.ServerName\"F\n\031DrainRegi",
+      "onServersRequest\022)\n\013server_name\030\001 \003(\0132\024." +
+      "hbase.pb.ServerName\"\034\n\032DrainRegionServer" +
+      "sResponse\"P\n#RemoveDrainFromRegionServer" +
+      "sRequest\022)\n\013server_name\030\001 \003(\0132\024.hbase.pb" +
+      ".ServerName\"&\n$RemoveDrainFromRegionServ" +
+      "ersResponse*(\n\020MasterSwitchType\022\t\n\005SPLIT" +
+      "\020\000\022\t\n\005MERGE\020\0012\2221\n\rMasterService\022e\n\024GetSc" +
+      "hemaAlterStatus\022%.hbase.pb.GetSchemaAlte" +
+      "rStatusRequest\032&.hbase.pb.GetSchemaAlter" +
+      "StatusResponse\022b\n\023GetTableDescriptors\022$.",
+      "hbase.pb.GetTableDescriptorsRequest\032%.hb" +
+      "ase.pb.GetTableDescriptorsResponse\022P\n\rGe" +
+      "tTableNames\022\036.hbase.pb.GetTableNamesRequ" +
+      "est\032\037.hbase.pb.GetTableNamesResponse\022Y\n\020" +
+      "GetClusterStatus\022!.hbase.pb.GetClusterSt" +
+      "atusRequest\032\".hbase.pb.GetClusterStatusR" +
+      "esponse\022V\n\017IsMasterRunning\022 .hbase.pb.Is" +
+      "MasterRunningRequest\032!.hbase.pb.IsMaster" +
+      "RunningResponse\022D\n\tAddColumn\022\032.hbase.pb." +
+      "AddColumnRequest\032\033.hbase.pb.AddColumnRes",
+      "ponse\022M\n\014DeleteColumn\022\035.hbase.pb.DeleteC" +
+      "olumnRequest\032\036.hbase.pb.DeleteColumnResp" +
+      "onse\022M\n\014ModifyColumn\022\035.hbase.pb.ModifyCo" +
+      "lumnRequest\032\036.hbase.pb.ModifyColumnRespo" +
+      "nse\022G\n\nMoveRegion\022\033.hbase.pb.MoveRegionR" +
+      "equest\032\034.hbase.pb.MoveRegionResponse\022\\\n\021" +
+      "MergeTableRegions\022\".hbase.pb.MergeTableR" +
+      "egionsRequest\032#.hbase.pb.MergeTableRegio" +
+      "nsResponse\022M\n\014AssignRegion\022\035.hbase.pb.As" +
+      "signRegionRequest\032\036.hbase.pb.AssignRegio",
+      "nResponse\022S\n\016UnassignRegion\022\037.hbase.pb.U" +
+      "nassignRegionRequest\032 .hbase.pb.Unassign" +
+      "RegionResponse\022P\n\rOfflineRegion\022\036.hbase." +
+      "pb.OfflineRegionRequest\032\037.hbase.pb.Offli" +
+      "neRegionResponse\022J\n\013DeleteTable\022\034.hbase." +
+      "pb.DeleteTableRequest\032\035.hbase.pb.DeleteT" +
+      "ableResponse\022P\n\rtruncateTable\022\036.hbase.pb" +
+      ".TruncateTableRequest\032\037.hbase.pb.Truncat" +
+      "eTableResponse\022J\n\013EnableTable\022\034.hbase.pb" +
+      ".EnableTableRequest\032\035.hbase.pb.EnableTab",
+      "leResponse\022M\n\014DisableTable\022\035.hbase.pb.Di" +
+      "sableTableRequest\032\036.hbase.pb.DisableTabl" +
+      "eResponse\022J\n\013ModifyTable\022\034.hbase.pb.Modi" +
+      "fyTableRequest\032\035.hbase.pb.ModifyTableRes" +
+      "ponse\022J\n\013CreateTable\022\034.hbase.pb.CreateTa" +
+      "bleRequest\032\035.hbase.pb.CreateTableRespons" +
+      "e\022A\n\010Shutdown\022\031.hbase.pb.ShutdownRequest" +
+      "\032\032.hbase.pb.ShutdownResponse\022G\n\nStopMast" +
+      "er\022\033.hbase.pb.StopMasterRequest\032\034.hbase." +
+      "pb.StopMasterResponse\022h\n\031IsMasterInMaint",
+      "enanceMode\022$.hbase.pb.IsInMaintenanceMod" +
+      "eRequest\032%.hbase.pb.IsInMaintenanceModeR" +
+      "esponse\022>\n\007Balance\022\030.hbase.pb.BalanceReq" +
+      "uest\032\031.hbase.pb.BalanceResponse\022_\n\022SetBa" +
+      "lancerRunning\022#.hbase.pb.SetBalancerRunn" +
+      "ingRequest\032$.hbase.pb.SetBalancerRunning" +
+      "Response\022\\\n\021IsBalancerEnabled\022\".hbase.pb" +
+      ".IsBalancerEnabledRequest\032#.hbase.pb.IsB" +
+      "alancerEnabledResponse\022k\n\026SetSplitOrMerg" +
+      "eEnabled\022\'.hbase.pb.SetSplitOrMergeEnabl",
+      "edRequest\032(.hbase.pb.SetSplitOrMergeEnab" +
+      "ledResponse\022h\n\025IsSplitOrMergeEnabled\022&.h" +
+      "base.pb.IsSplitOrMergeEnabledRequest\032\'.h" +
+      "base.pb.IsSplitOrMergeEnabledResponse\022D\n" +
+      "\tNormalize\022\032.hbase.pb.NormalizeRequest\032\033" +
+      ".hbase.pb.NormalizeResponse\022e\n\024SetNormal" +
+      "izerRunning\022%.hbase.pb.SetNormalizerRunn" +
+      "ingRequest\032&.hbase.pb.SetNormalizerRunni" +
+      "ngResponse\022b\n\023IsNormalizerEnabled\022$.hbas" +
+      "e.pb.IsNormalizerEnabledRequest\032%.hbase.",
+      "pb.IsNormalizerEnabledResponse\022S\n\016RunCat" +
+      "alogScan\022\037.hbase.pb.RunCatalogScanReques" +
+      "t\032 .hbase.pb.RunCatalogScanResponse\022e\n\024E" +
+      "nableCatalogJanitor\022%.hbase.pb.EnableCat" +
+      "alogJanitorRequest\032&.hbase.pb.EnableCata" +
+      "logJanitorResponse\022n\n\027IsCatalogJanitorEn" +
+      "abled\022(.hbase.pb.IsCatalogJanitorEnabled" +
+      "Request\032).hbase.pb.IsCatalogJanitorEnabl" +
+      "edResponse\022^\n\021ExecMasterService\022#.hbase." +
+      "pb.CoprocessorServiceRequest\032$.hbase.pb.",
+      "CoprocessorServiceResponse\022A\n\010Snapshot\022\031" +
+      ".hbase.pb.SnapshotRequest\032\032.hbase.pb.Sna" +
+      "pshotResponse\022h\n\025GetCompletedSnapshots\022&" +
+      ".hbase.pb.GetCompletedSnapshotsRequest\032\'" +
+      ".hbase.pb.GetCompletedSnapshotsResponse\022" +
+      "S\n\016DeleteSnapshot\022\037.hbase.pb.DeleteSnaps" +
+      "hotRequest\032 .hbase.pb.DeleteSnapshotResp" +
+      "onse\022S\n\016IsSnapshotDone\022\037.hbase.pb.IsSnap" +
+      "shotDoneRequest\032 .hbase.pb.IsSnapshotDon" +
+      "eResponse\022V\n\017RestoreSnapshot\022 .hbase.pb.",
+      "RestoreSnapshotRequest\032!.hbase.pb.Restor" +
+      "eSnapshotResponse\022P\n\rExecProcedure\022\036.hba" +
+      "se.pb.ExecProcedureRequest\032\037.hbase.pb.Ex" +
+      "ecProcedureResponse\022W\n\024ExecProcedureWith" +
+      "Ret\022\036.hbase.pb.ExecProcedureRequest\032\037.hb" +
+      "ase.pb.ExecProcedureResponse\022V\n\017IsProced" +
+      "ureDone\022 .hbase.pb.IsProcedureDoneReques" +
+      "t\032!.hbase.pb.IsProcedureDoneResponse\022V\n\017" +
+      "ModifyNamespace\022 .hbase.pb.ModifyNamespa" +
+      "ceRequest\032!.hbase.pb.ModifyNamespaceResp",
+      "onse\022V\n\017CreateNamespace\022 .hbase.pb.Creat" +
+      "eNamespaceRequest\032!.hbase.pb.CreateNames" +
+      "paceResponse\022V\n\017DeleteNamespace\022 .hbase." +
+      "pb.DeleteNamespaceRequest\032!.hbase.pb.Del" +
+      "eteNamespaceResponse\022k\n\026GetNamespaceDesc" +
+      "riptor\022\'.hbase.pb.GetNamespaceDescriptor" +
+      "Request\032(.hbase.pb.GetNamespaceDescripto" +
+      "rResponse\022q\n\030ListNamespaceDescriptors\022)." +
+      "hbase.pb.ListNamespaceDescriptorsRequest" +
+      "\032*.hbase.pb.ListNamespaceDescriptorsResp",
+      "onse\022\206\001\n\037ListTableDescriptorsByNamespace" +
+      "\0220.hbase.pb.ListTableDescriptorsByNamesp" +
+      "aceRequest\0321.hbase.pb.ListTableDescripto" +
+      "rsByNamespaceResponse\022t\n\031ListTableNamesB" +
+      "yNamespace\022*.hbase.pb.ListTableNamesByNa" +
+      "mespaceRequest\032+.hbase.pb.ListTableNames" +
+      "ByNamespaceResponse\022P\n\rGetTableState\022\036.h" +
+      "base.pb.GetTableStateRequest\032\037.hbase.pb." +
+      "GetTableStateResponse\022A\n\010SetQuota\022\031.hbas" +
+      "e.pb.SetQuotaRequest\032\032.hbase.pb.SetQuota",
+      "Response\022x\n\037getLastMajorCompactionTimest" +
+      "amp\022).hbase.pb.MajorCompactionTimestampR" +
+      "equest\032*.hbase.pb.MajorCompactionTimesta" +
+      "mpResponse\022\212\001\n(getLastMajorCompactionTim" +
+      "estampForRegion\0222.hbase.pb.MajorCompacti" +
+      "onTimestampForRegionRequest\032*.hbase.pb.M" +
+      "ajorCompactionTimestampResponse\022_\n\022getPr" +
+      "ocedureResult\022#.hbase.pb.GetProcedureRes" +
+      "ultRequest\032$.hbase.pb.GetProcedureResult" +
+      "Response\022h\n\027getSecurityCapabilities\022%.hb",
+      "ase.pb.SecurityCapabilitiesRequest\032&.hba" +
+      "se.pb.SecurityCapabilitiesResponse\022S\n\016Ab" +
+      "ortProcedure\022\037.hbase.pb.AbortProcedureRe" +
+      "quest\032 .hbase.pb.AbortProcedureResponse\022" +
+      "S\n\016ListProcedures\022\037.hbase.pb.ListProcedu" +
+      "resRequest\032 .hbase.pb.ListProceduresResp" +
+      "onse\022_\n\022AddReplicationPeer\022#.hbase.pb.Ad" +
+      "dReplicationPeerRequest\032$.hbase.pb.AddRe" +
+      "plicationPeerResponse\022h\n\025RemoveReplicati" +
+      "onPeer\022&.hbase.pb.RemoveReplicationPeerR",
+      "equest\032\'.hbase.pb.RemoveReplicationPeerR" +
+      "esponse\022h\n\025EnableReplicationPeer\022&.hbase" +
+      ".pb.EnableReplicationPeerRequest\032\'.hbase" +
+      ".pb.EnableReplicationPeerResponse\022k\n\026Dis" +
+      "ableReplicationPeer\022\'.hbase.pb.DisableRe" +
+      "plicationPeerRequest\032(.hbase.pb.DisableR" +
+      "eplicationPeerResponse\022q\n\030GetReplication" +
+      "PeerConfig\022).hbase.pb.GetReplicationPeer" +
+      "ConfigRequest\032*.hbase.pb.GetReplicationP" +
+      "eerConfigResponse\022z\n\033UpdateReplicationPe",
+      "erConfig\022,.hbase.pb.UpdateReplicationPee" +
+      "rConfigRequest\032-.hbase.pb.UpdateReplicat" +
+      "ionPeerConfigResponse\022e\n\024ListReplication" +
+      "Peers\022%.hbase.pb.ListReplicationPeersReq" +
+      "uest\032&.hbase.pb.ListReplicationPeersResp" +
+      "onse\022t\n\031listDrainingRegionServers\022*.hbas" +
+      "e.pb.ListDrainingRegionServersRequest\032+." +
+      "hbase.pb.ListDrainingRegionServersRespon" +
+      "se\022_\n\022drainRegionServers\022#.hbase.pb.Drai" +
+      "nRegionServersRequest\032$.hbase.pb.DrainRe",
+      "gionServersResponse\022}\n\034removeDrainFromRe" +
+      "gionServers\022-.hbase.pb.RemoveDrainFromRe" +
+      "gionServersRequest\032..hbase.pb.RemoveDrai" +
+      "nFromRegionServersResponseBI\n1org.apache" +
+      ".hadoop.hbase.shaded.protobuf.generatedB" +
+      "\014MasterProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -74566,7 +74766,7 @@ public final class MasterProtos {
     internal_static_hbase_pb_SetQuotaRequest_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_SetQuotaRequest_descriptor,
-        new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", });
+        new java.lang.String[] { "UserName", "UserGroup", "Namespace", "TableName", "RemoveAll", "BypassGlobals", "Throttle", "SpaceLimit", });
     internal_static_hbase_pb_SetQuotaResponse_descriptor =
       getDescriptor().getMessageTypes().get(105);
     internal_static_hbase_pb_SetQuotaResponse_fieldAccessorTable = new


[02/50] [abbrv] hbase git commit: HBASE-17475 Stack overflow in AsyncProcess if retry too much

Posted by el...@apache.org.
HBASE-17475 Stack overflow in AsyncProcess if retry too much

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9b38c1a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9b38c1a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9b38c1a3

Branch: refs/heads/HBASE-16961
Commit: 9b38c1a33ce1a7ee41cb14980b3b74e317d30b78
Parents: 4ab95eb
Author: Allan Yang <al...@163.com>
Authored: Tue Jan 17 15:45:48 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Tue Jan 17 15:45:48 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9b38c1a3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index 036196e..b0a2798 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -42,6 +42,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CallQueueTooBigException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.RegionLocations;
@@ -567,8 +568,11 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
       }
 
       // run all the runnables
+      // HBASE-17475: Do not reuse the thread after stack reach a certain depth to prevent stack overflow
+      // for now, we use HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER to control the depth
       for (Runnable runnable : runnables) {
-        if ((--actionsRemaining == 0) && reuseThread) {
+        if ((--actionsRemaining == 0) && reuseThread
+            && numAttempt % HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER != 0) {
           runnable.run();
         } else {
           try {


[23/50] [abbrv] hbase git commit: HBASE-16831 Procedure V2 - Remove org.apache.hadoop.hbase.zookeeper.lock (Appy)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index 0095043..81cc69b 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -7793,1221 +7793,6 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:hbase.pb.ReplicationHLogPosition)
   }
 
-  public interface TableLockOrBuilder
-      extends com.google.protobuf.MessageOrBuilder {
-
-    // optional .hbase.pb.TableName table_name = 1;
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    boolean hasTableName();
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
-
-    // optional .hbase.pb.ServerName lock_owner = 2;
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    boolean hasLockOwner();
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner();
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder();
-
-    // optional int64 thread_id = 3;
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    boolean hasThreadId();
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    long getThreadId();
-
-    // optional bool is_shared = 4;
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    boolean hasIsShared();
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    boolean getIsShared();
-
-    // optional string purpose = 5;
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    boolean hasPurpose();
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    java.lang.String getPurpose();
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    com.google.protobuf.ByteString
-        getPurposeBytes();
-
-    // optional int64 create_time = 6;
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    boolean hasCreateTime();
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    long getCreateTime();
-  }
-  /**
-   * Protobuf type {@code hbase.pb.TableLock}
-   *
-   * <pre>
-   **
-   * Metadata associated with a table lock in zookeeper
-   * </pre>
-   */
-  public static final class TableLock extends
-      com.google.protobuf.GeneratedMessage
-      implements TableLockOrBuilder {
-    // Use TableLock.newBuilder() to construct.
-    private TableLock(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
-      super(builder);
-      this.unknownFields = builder.getUnknownFields();
-    }
-    private TableLock(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-    private static final TableLock defaultInstance;
-    public static TableLock getDefaultInstance() {
-      return defaultInstance;
-    }
-
-    public TableLock getDefaultInstanceForType() {
-      return defaultInstance;
-    }
-
-    private final com.google.protobuf.UnknownFieldSet unknownFields;
-    @java.lang.Override
-    public final com.google.protobuf.UnknownFieldSet
-        getUnknownFields() {
-      return this.unknownFields;
-    }
-    private TableLock(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      initFields();
-      int mutable_bitField0_ = 0;
-      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-          com.google.protobuf.UnknownFieldSet.newBuilder();
-      try {
-        boolean done = false;
-        while (!done) {
-          int tag = input.readTag();
-          switch (tag) {
-            case 0:
-              done = true;
-              break;
-            default: {
-              if (!parseUnknownField(input, unknownFields,
-                                     extensionRegistry, tag)) {
-                done = true;
-              }
-              break;
-            }
-            case 10: {
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = tableName_.toBuilder();
-              }
-              tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(tableName_);
-                tableName_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000001;
-              break;
-            }
-            case 18: {
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
-              if (((bitField0_ & 0x00000002) == 0x00000002)) {
-                subBuilder = lockOwner_.toBuilder();
-              }
-              lockOwner_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
-              if (subBuilder != null) {
-                subBuilder.mergeFrom(lockOwner_);
-                lockOwner_ = subBuilder.buildPartial();
-              }
-              bitField0_ |= 0x00000002;
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000004;
-              threadId_ = input.readInt64();
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000008;
-              isShared_ = input.readBool();
-              break;
-            }
-            case 42: {
-              bitField0_ |= 0x00000010;
-              purpose_ = input.readBytes();
-              break;
-            }
-            case 48: {
-              bitField0_ |= 0x00000020;
-              createTime_ = input.readInt64();
-              break;
-            }
-          }
-        }
-      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-        throw e.setUnfinishedMessage(this);
-      } catch (java.io.IOException e) {
-        throw new com.google.protobuf.InvalidProtocolBufferException(
-            e.getMessage()).setUnfinishedMessage(this);
-      } finally {
-        this.unknownFields = unknownFields.build();
-        makeExtensionsImmutable();
-      }
-    }
-    public static final com.google.protobuf.Descriptors.Descriptor
-        getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-    }
-
-    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-        internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
-          .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
-    }
-
-    public static com.google.protobuf.Parser<TableLock> PARSER =
-        new com.google.protobuf.AbstractParser<TableLock>() {
-      public TableLock parsePartialFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws com.google.protobuf.InvalidProtocolBufferException {
-        return new TableLock(input, extensionRegistry);
-      }
-    };
-
-    @java.lang.Override
-    public com.google.protobuf.Parser<TableLock> getParserForType() {
-      return PARSER;
-    }
-
-    private int bitField0_;
-    // optional .hbase.pb.TableName table_name = 1;
-    public static final int TABLE_NAME_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public boolean hasTableName() {
-      return ((bitField0_ & 0x00000001) == 0x00000001);
-    }
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
-      return tableName_;
-    }
-    /**
-     * <code>optional .hbase.pb.TableName table_name = 1;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-      return tableName_;
-    }
-
-    // optional .hbase.pb.ServerName lock_owner = 2;
-    public static final int LOCK_OWNER_FIELD_NUMBER = 2;
-    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_;
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public boolean hasLockOwner() {
-      return ((bitField0_ & 0x00000002) == 0x00000002);
-    }
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
-      return lockOwner_;
-    }
-    /**
-     * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
-      return lockOwner_;
-    }
-
-    // optional int64 thread_id = 3;
-    public static final int THREAD_ID_FIELD_NUMBER = 3;
-    private long threadId_;
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    public boolean hasThreadId() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
-    }
-    /**
-     * <code>optional int64 thread_id = 3;</code>
-     */
-    public long getThreadId() {
-      return threadId_;
-    }
-
-    // optional bool is_shared = 4;
-    public static final int IS_SHARED_FIELD_NUMBER = 4;
-    private boolean isShared_;
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    public boolean hasIsShared() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
-    }
-    /**
-     * <code>optional bool is_shared = 4;</code>
-     */
-    public boolean getIsShared() {
-      return isShared_;
-    }
-
-    // optional string purpose = 5;
-    public static final int PURPOSE_FIELD_NUMBER = 5;
-    private java.lang.Object purpose_;
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public boolean hasPurpose() {
-      return ((bitField0_ & 0x00000010) == 0x00000010);
-    }
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public java.lang.String getPurpose() {
-      java.lang.Object ref = purpose_;
-      if (ref instanceof java.lang.String) {
-        return (java.lang.String) ref;
-      } else {
-        com.google.protobuf.ByteString bs = 
-            (com.google.protobuf.ByteString) ref;
-        java.lang.String s = bs.toStringUtf8();
-        if (bs.isValidUtf8()) {
-          purpose_ = s;
-        }
-        return s;
-      }
-    }
-    /**
-     * <code>optional string purpose = 5;</code>
-     */
-    public com.google.protobuf.ByteString
-        getPurposeBytes() {
-      java.lang.Object ref = purpose_;
-      if (ref instanceof java.lang.String) {
-        com.google.protobuf.ByteString b = 
-            com.google.protobuf.ByteString.copyFromUtf8(
-                (java.lang.String) ref);
-        purpose_ = b;
-        return b;
-      } else {
-        return (com.google.protobuf.ByteString) ref;
-      }
-    }
-
-    // optional int64 create_time = 6;
-    public static final int CREATE_TIME_FIELD_NUMBER = 6;
-    private long createTime_;
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    public boolean hasCreateTime() {
-      return ((bitField0_ & 0x00000020) == 0x00000020);
-    }
-    /**
-     * <code>optional int64 create_time = 6;</code>
-     */
-    public long getCreateTime() {
-      return createTime_;
-    }
-
-    private void initFields() {
-      tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
-      lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      threadId_ = 0L;
-      isShared_ = false;
-      purpose_ = "";
-      createTime_ = 0L;
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      if (hasTableName()) {
-        if (!getTableName().isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      if (hasLockOwner()) {
-        if (!getLockOwner().isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
-      }
-      memoizedIsInitialized = 1;
-      return true;
-    }
-
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, tableName_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeMessage(2, lockOwner_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeInt64(3, threadId_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeBool(4, isShared_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        output.writeBytes(5, getPurposeBytes());
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        output.writeInt64(6, createTime_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
-
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, tableName_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(2, lockOwner_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(3, threadId_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBoolSize(4, isShared_);
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeBytesSize(5, getPurposeBytes());
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeInt64Size(6, createTime_);
-      }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
-
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)) {
-        return super.equals(obj);
-      }
-      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) obj;
-
-      boolean result = true;
-      result = result && (hasTableName() == other.hasTableName());
-      if (hasTableName()) {
-        result = result && getTableName()
-            .equals(other.getTableName());
-      }
-      result = result && (hasLockOwner() == other.hasLockOwner());
-      if (hasLockOwner()) {
-        result = result && getLockOwner()
-            .equals(other.getLockOwner());
-      }
-      result = result && (hasThreadId() == other.hasThreadId());
-      if (hasThreadId()) {
-        result = result && (getThreadId()
-            == other.getThreadId());
-      }
-      result = result && (hasIsShared() == other.hasIsShared());
-      if (hasIsShared()) {
-        result = result && (getIsShared()
-            == other.getIsShared());
-      }
-      result = result && (hasPurpose() == other.hasPurpose());
-      if (hasPurpose()) {
-        result = result && getPurpose()
-            .equals(other.getPurpose());
-      }
-      result = result && (hasCreateTime() == other.hasCreateTime());
-      if (hasCreateTime()) {
-        result = result && (getCreateTime()
-            == other.getCreateTime());
-      }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
-
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
-      }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasTableName()) {
-        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
-        hash = (53 * hash) + getTableName().hashCode();
-      }
-      if (hasLockOwner()) {
-        hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
-        hash = (53 * hash) + getLockOwner().hashCode();
-      }
-      if (hasThreadId()) {
-        hash = (37 * hash) + THREAD_ID_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getThreadId());
-      }
-      if (hasIsShared()) {
-        hash = (37 * hash) + IS_SHARED_FIELD_NUMBER;
-        hash = (53 * hash) + hashBoolean(getIsShared());
-      }
-      if (hasPurpose()) {
-        hash = (37 * hash) + PURPOSE_FIELD_NUMBER;
-        hash = (53 * hash) + getPurpose().hashCode();
-      }
-      if (hasCreateTime()) {
-        hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getCreateTime());
-      }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
-
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parseFrom(
-        com.google.protobuf.CodedInputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
-    }
-
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock prototype) {
-      return newBuilder().mergeFrom(prototype);
-    }
-    public Builder toBuilder() { return newBuilder(this); }
-
-    @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
-    }
-    /**
-     * Protobuf type {@code hbase.pb.TableLock}
-     *
-     * <pre>
-     **
-     * Metadata associated with a table lock in zookeeper
-     * </pre>
-     */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLockOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-      }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
-      }
-
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
-
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getTableNameFieldBuilder();
-          getLockOwnerFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
-
-      public Builder clear() {
-        super.clear();
-        if (tableNameBuilder_ == null) {
-          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-        } else {
-          lockOwnerBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        threadId_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        isShared_ = false;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        purpose_ = "";
-        bitField0_ = (bitField0_ & ~0x00000010);
-        createTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000020);
-        return this;
-      }
-
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
-
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_TableLock_descriptor;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance();
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock build() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = buildPartial();
-        if (!result.isInitialized()) {
-          throw newUninitializedMessageException(result);
-        }
-        return result;
-      }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock(this);
-        int from_bitField0_ = bitField0_;
-        int to_bitField0_ = 0;
-        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
-          to_bitField0_ |= 0x00000001;
-        }
-        if (tableNameBuilder_ == null) {
-          result.tableName_ = tableName_;
-        } else {
-          result.tableName_ = tableNameBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
-          to_bitField0_ |= 0x00000002;
-        }
-        if (lockOwnerBuilder_ == null) {
-          result.lockOwner_ = lockOwner_;
-        } else {
-          result.lockOwner_ = lockOwnerBuilder_.build();
-        }
-        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
-          to_bitField0_ |= 0x00000004;
-        }
-        result.threadId_ = threadId_;
-        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
-          to_bitField0_ |= 0x00000008;
-        }
-        result.isShared_ = isShared_;
-        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
-          to_bitField0_ |= 0x00000010;
-        }
-        result.purpose_ = purpose_;
-        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
-          to_bitField0_ |= 0x00000020;
-        }
-        result.createTime_ = createTime_;
-        result.bitField0_ = to_bitField0_;
-        onBuilt();
-        return result;
-      }
-
-      public Builder mergeFrom(com.google.protobuf.Message other) {
-        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) {
-          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock)other);
-        } else {
-          super.mergeFrom(other);
-          return this;
-        }
-      }
-
-      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock other) {
-        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.getDefaultInstance()) return this;
-        if (other.hasTableName()) {
-          mergeTableName(other.getTableName());
-        }
-        if (other.hasLockOwner()) {
-          mergeLockOwner(other.getLockOwner());
-        }
-        if (other.hasThreadId()) {
-          setThreadId(other.getThreadId());
-        }
-        if (other.hasIsShared()) {
-          setIsShared(other.getIsShared());
-        }
-        if (other.hasPurpose()) {
-          bitField0_ |= 0x00000010;
-          purpose_ = other.purpose_;
-          onChanged();
-        }
-        if (other.hasCreateTime()) {
-          setCreateTime(other.getCreateTime());
-        }
-        this.mergeUnknownFields(other.getUnknownFields());
-        return this;
-      }
-
-      public final boolean isInitialized() {
-        if (hasTableName()) {
-          if (!getTableName().isInitialized()) {
-            
-            return false;
-          }
-        }
-        if (hasLockOwner()) {
-          if (!getLockOwner().isInitialized()) {
-            
-            return false;
-          }
-        }
-        return true;
-      }
-
-      public Builder mergeFrom(
-          com.google.protobuf.CodedInputStream input,
-          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-          throws java.io.IOException {
-        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock parsedMessage = null;
-        try {
-          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
-        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock) e.getUnfinishedMessage();
-          throw e;
-        } finally {
-          if (parsedMessage != null) {
-            mergeFrom(parsedMessage);
-          }
-        }
-        return this;
-      }
-      private int bitField0_;
-
-      // optional .hbase.pb.TableName table_name = 1;
-      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public boolean hasTableName() {
-        return ((bitField0_ & 0x00000001) == 0x00000001);
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
-        if (tableNameBuilder_ == null) {
-          return tableName_;
-        } else {
-          return tableNameBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          tableName_ = value;
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder setTableName(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
-        if (tableNameBuilder_ == null) {
-          tableName_ = builderForValue.build();
-          onChanged();
-        } else {
-          tableNameBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
-        if (tableNameBuilder_ == null) {
-          if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
-            tableName_ =
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
-          } else {
-            tableName_ = value;
-          }
-          onChanged();
-        } else {
-          tableNameBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000001;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public Builder clearTableName() {
-        if (tableNameBuilder_ == null) {
-          tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
-          onChanged();
-        } else {
-          tableNameBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000001);
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
-        bitField0_ |= 0x00000001;
-        onChanged();
-        return getTableNameFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
-        if (tableNameBuilder_ != null) {
-          return tableNameBuilder_.getMessageOrBuilder();
-        } else {
-          return tableName_;
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.TableName table_name = 1;</code>
-       */
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
-          getTableNameFieldBuilder() {
-        if (tableNameBuilder_ == null) {
-          tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
-                  tableName_,
-                  getParentForChildren(),
-                  isClean());
-          tableName_ = null;
-        }
-        return tableNameBuilder_;
-      }
-
-      // optional .hbase.pb.ServerName lock_owner = 2;
-      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_;
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public boolean hasLockOwner() {
-        return ((bitField0_ & 0x00000002) == 0x00000002);
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner() {
-        if (lockOwnerBuilder_ == null) {
-          return lockOwner_;
-        } else {
-          return lockOwnerBuilder_.getMessage();
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder setLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (lockOwnerBuilder_ == null) {
-          if (value == null) {
-            throw new NullPointerException();
-          }
-          lockOwner_ = value;
-          onChanged();
-        } else {
-          lockOwnerBuilder_.setMessage(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder setLockOwner(
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = builderForValue.build();
-          onChanged();
-        } else {
-          lockOwnerBuilder_.setMessage(builderForValue.build());
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder mergeLockOwner(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
-        if (lockOwnerBuilder_ == null) {
-          if (((bitField0_ & 0x00000002) == 0x00000002) &&
-              lockOwner_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
-            lockOwner_ =
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(lockOwner_).mergeFrom(value).buildPartial();
-          } else {
-            lockOwner_ = value;
-          }
-          onChanged();
-        } else {
-          lockOwnerBuilder_.mergeFrom(value);
-        }
-        bitField0_ |= 0x00000002;
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public Builder clearLockOwner() {
-        if (lockOwnerBuilder_ == null) {
-          lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
-          onChanged();
-        } else {
-          lockOwnerBuilder_.clear();
-        }
-        bitField0_ = (bitField0_ & ~0x00000002);
-        return this;
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getLockOwnerBuilder() {
-        bitField0_ |= 0x00000002;
-        onChanged();
-        return getLockOwnerFieldBuilder().getBuilder();
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder() {
-        if (lockOwnerBuilder_ != null) {
-          return lockOwnerBuilder_.getMessageOrBuilder();
-        } else {
-          return lockOwner_;
-        }
-      }
-      /**
-       * <code>optional .hbase.pb.ServerName lock_owner = 2;</code>
-       */
-      private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
-          getLockOwnerFieldBuilder() {
-        if (lockOwnerBuilder_ == null) {
-          lockOwnerBuilder_ = new com.google.protobuf.SingleFieldBuilder<
-              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
-                  lockOwner_,
-                  getParentForChildren(),
-                  isClean());
-          lockOwner_ = null;
-        }
-        return lockOwnerBuilder_;
-      }
-
-      // optional int64 thread_id = 3;
-      private long threadId_ ;
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public boolean hasThreadId() {
-        return ((bitField0_ & 0x00000004) == 0x00000004);
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public long getThreadId() {
-        return threadId_;
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public Builder setThreadId(long value) {
-        bitField0_ |= 0x00000004;
-        threadId_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional int64 thread_id = 3;</code>
-       */
-      public Builder clearThreadId() {
-        bitField0_ = (bitField0_ & ~0x00000004);
-        threadId_ = 0L;
-        onChanged();
-        return this;
-      }
-
-      // optional bool is_shared = 4;
-      private boolean isShared_ ;
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public boolean hasIsShared() {
-        return ((bitField0_ & 0x00000008) == 0x00000008);
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public boolean getIsShared() {
-        return isShared_;
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public Builder setIsShared(boolean value) {
-        bitField0_ |= 0x00000008;
-        isShared_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional bool is_shared = 4;</code>
-       */
-      public Builder clearIsShared() {
-        bitField0_ = (bitField0_ & ~0x00000008);
-        isShared_ = false;
-        onChanged();
-        return this;
-      }
-
-      // optional string purpose = 5;
-      private java.lang.Object purpose_ = "";
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public boolean hasPurpose() {
-        return ((bitField0_ & 0x00000010) == 0x00000010);
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public java.lang.String getPurpose() {
-        java.lang.Object ref = purpose_;
-        if (!(ref instanceof java.lang.String)) {
-          java.lang.String s = ((com.google.protobuf.ByteString) ref)
-              .toStringUtf8();
-          purpose_ = s;
-          return s;
-        } else {
-          return (java.lang.String) ref;
-        }
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public com.google.protobuf.ByteString
-          getPurposeBytes() {
-        java.lang.Object ref = purpose_;
-        if (ref instanceof String) {
-          com.google.protobuf.ByteString b = 
-              com.google.protobuf.ByteString.copyFromUtf8(
-                  (java.lang.String) ref);
-          purpose_ = b;
-          return b;
-        } else {
-          return (com.google.protobuf.ByteString) ref;
-        }
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder setPurpose(
-          java.lang.String value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000010;
-        purpose_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder clearPurpose() {
-        bitField0_ = (bitField0_ & ~0x00000010);
-        purpose_ = getDefaultInstance().getPurpose();
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional string purpose = 5;</code>
-       */
-      public Builder setPurposeBytes(
-          com.google.protobuf.ByteString value) {
-        if (value == null) {
-    throw new NullPointerException();
-  }
-  bitField0_ |= 0x00000010;
-        purpose_ = value;
-        onChanged();
-        return this;
-      }
-
-      // optional int64 create_time = 6;
-      private long createTime_ ;
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public boolean hasCreateTime() {
-        return ((bitField0_ & 0x00000020) == 0x00000020);
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public long getCreateTime() {
-        return createTime_;
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public Builder setCreateTime(long value) {
-        bitField0_ |= 0x00000020;
-        createTime_ = value;
-        onChanged();
-        return this;
-      }
-      /**
-       * <code>optional int64 create_time = 6;</code>
-       */
-      public Builder clearCreateTime() {
-        bitField0_ = (bitField0_ & ~0x00000020);
-        createTime_ = 0L;
-        onChanged();
-        return this;
-      }
-
-      // @@protoc_insertion_point(builder_scope:hbase.pb.TableLock)
-    }
-
-    static {
-      defaultInstance = new TableLock(true);
-      defaultInstance.initFields();
-    }
-
-    // @@protoc_insertion_point(class_scope:hbase.pb.TableLock)
-  }
-
   public interface SwitchStateOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -9498,11 +8283,6 @@ public final class ZooKeeperProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_hbase_pb_ReplicationHLogPosition_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
-    internal_static_hbase_pb_TableLock_descriptor;
-  private static
-    com.google.protobuf.GeneratedMessage.FieldAccessorTable
-      internal_static_hbase_pb_TableLock_fieldAccessorTable;
-  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_SwitchState_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -9546,14 +8326,9 @@ public final class ZooKeeperProtos {
       "tate\022/\n\005state\030\001 \002(\0162 .hbase.pb.Replicati" +
       "onState.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010D" +
       "ISABLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n\010",
-      "position\030\001 \002(\003\"\252\001\n\tTableLock\022\'\n\ntable_na" +
-      "me\030\001 \001(\0132\023.hbase.pb.TableName\022(\n\nlock_ow" +
-      "ner\030\002 \001(\0132\024.hbase.pb.ServerName\022\021\n\tthrea" +
-      "d_id\030\003 \001(\003\022\021\n\tis_shared\030\004 \001(\010\022\017\n\007purpose" +
-      "\030\005 \001(\t\022\023\n\013create_time\030\006 \001(\003\"\036\n\013SwitchSta" +
-      "te\022\017\n\007enabled\030\001 \001(\010BE\n*org.apache.hadoop" +
-      ".hbase.protobuf.generatedB\017ZooKeeperProt" +
-      "osH\001\210\001\001\240\001\001"
+      "position\030\001 \002(\003\"\036\n\013SwitchState\022\017\n\007enabled" +
+      "\030\001 \001(\010BE\n*org.apache.hadoop.hbase.protob" +
+      "uf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -9614,14 +8389,8 @@ public final class ZooKeeperProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_ReplicationHLogPosition_descriptor,
               new java.lang.String[] { "Position", });
-          internal_static_hbase_pb_TableLock_descriptor =
-            getDescriptor().getMessageTypes().get(9);
-          internal_static_hbase_pb_TableLock_fieldAccessorTable = new
-            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
-              internal_static_hbase_pb_TableLock_descriptor,
-              new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", });
           internal_static_hbase_pb_SwitchState_descriptor =
-            getDescriptor().getMessageTypes().get(10);
+            getDescriptor().getMessageTypes().get(9);
           internal_static_hbase_pb_SwitchState_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SwitchState_descriptor,

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-protocol/src/main/protobuf/ZooKeeper.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ZooKeeper.proto b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
index 6f13e4a..ee73c84 100644
--- a/hbase-protocol/src/main/protobuf/ZooKeeper.proto
+++ b/hbase-protocol/src/main/protobuf/ZooKeeper.proto
@@ -144,18 +144,6 @@ message ReplicationHLogPosition {
 }
 
 /**
- * Metadata associated with a table lock in zookeeper
- */
-message TableLock {
-  optional TableName table_name = 1;
-  optional ServerName lock_owner = 2;
-  optional int64 thread_id = 3;
-  optional bool is_shared = 4;
-  optional string purpose = 5;
-  optional int64 create_time = 6;
-}
-
-/**
  * State of the switch.
  */
 message SwitchState {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
deleted file mode 100644
index d1216f1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessLock.java
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * An interface for an application-specific lock.
- */
-@InterfaceAudience.Private
-public interface InterProcessLock {
-
-  /**
-   * Acquire the lock, waiting indefinitely until the lock is released or
-   * the thread is interrupted.
-   * @throws IOException If there is an unrecoverable error releasing the lock
-   * @throws InterruptedException If current thread is interrupted while
-   *                              waiting for the lock
-   */
-  void acquire() throws IOException, InterruptedException;
-
-  /**
-   * Acquire the lock within a wait time.
-   * @param timeoutMs The maximum time (in milliseconds) to wait for the lock,
-   *                  -1 to wait indefinitely
-   * @return True if the lock was acquired, false if waiting time elapsed
-   *         before the lock was acquired
-   * @throws IOException If there is an unrecoverable error talking talking
-   *                     (e.g., when talking to a lock service) when acquiring
-   *                     the lock
-   * @throws InterruptedException If the thread is interrupted while waiting to
-   *                              acquire the lock
-   */
-  boolean tryAcquire(long timeoutMs)
-  throws IOException, InterruptedException;
-
-  /**
-   * Release the lock.
-   * @throws IOException If there is an unrecoverable error releasing the lock
-   * @throws InterruptedException If the thread is interrupted while releasing
-   *                              the lock
-   */
-  void release() throws IOException, InterruptedException;
-
-  /**
-   * If supported, attempts to reap all the locks of this type by forcefully
-   * deleting the locks (both held and attempted) that have expired according
-   * to the given timeout. Lock reaping is different than coordinated lock revocation
-   * in that, there is no coordination, and the behavior is undefined if the
-   * lock holder is still alive.
-   * @throws IOException If there is an unrecoverable error reaping the locks
-   */
-  void reapExpiredLocks(long expireTimeoutMs) throws IOException;
-
-  /**
-   * If supported, attempts to reap all the locks of this type by forcefully
-   * deleting the locks (both held and attempted). Lock reaping is different
-   * than coordinated lock revocation in that, there is no coordination, and
-   * the behavior is undefined if the lock holder is still alive.
-   * Calling this should have the same affect as calling {@link #reapExpiredLocks(long)}
-   * with timeout=0.
-   * @throws IOException If there is an unrecoverable error reaping the locks
-   */
-  void reapAllLocks() throws IOException;
-
-  /**
-   * An interface for objects that process lock metadata.
-   */
-  interface MetadataHandler {
-
-    /**
-     * Called after lock metadata is successfully read from a distributed
-     * lock service. This method may contain any procedures for, e.g.,
-     * printing the metadata in a humanly-readable format.
-     * @param metadata The metadata
-     */
-    void handleMetadata(byte[] metadata);
-  }
-
-  /**
-   * Visits the locks (both held and attempted) of this type with the given
-   * {@link MetadataHandler}.
-   * @throws IOException If there is an unrecoverable error
-   */
-  void visitLocks(MetadataHandler handler) throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java
deleted file mode 100644
index 0f7ddbe..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/InterProcessReadWriteLock.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-
-/**
- * An interface for a distributed reader-writer lock.
- */
-@InterfaceAudience.Private
-public interface InterProcessReadWriteLock {
-
-  /**
-   * Obtain a read lock containing given metadata.
-   * @param metadata Serialized lock metadata (this may contain information
-   *                 such as the process owning the lock or the purpose for
-   *                 which the lock was acquired).
-   * @return An instantiated InterProcessLock instance
-   */
-  InterProcessLock readLock(byte[] metadata);
-
-  /**
-   * Obtain a write lock containing given metadata.
-   * @param metadata Serialized lock metadata (this may contain information
-   *                 such as the process owning the lock or the purpose for
-   *                 which the lock was acquired).
-   * @return An instantiated InterProcessLock instance
-   */
-  InterProcessLock writeLock(byte[] metadata);
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
deleted file mode 100644
index 090acf7..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
+++ /dev/null
@@ -1,459 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper.lock;
-
-import java.io.IOException;
-import java.util.Comparator;
-import java.util.List;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.InterProcessLock;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.zookeeper.DeletionListener;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.KeeperException.BadVersionException;
-import org.apache.zookeeper.data.Stat;
-
-import com.google.common.base.Preconditions;
-
-/**
- * ZooKeeper based HLock implementation. Based on the Shared Locks recipe.
- * (see:
- * <a href="http://zookeeper.apache.org/doc/trunk/recipes.html">
- * ZooKeeper Recipes and Solutions
- * </a>)
- */
-@InterfaceAudience.Private
-public abstract class ZKInterProcessLockBase implements InterProcessLock {
-
-  private static final Log LOG = LogFactory.getLog(ZKInterProcessLockBase.class);
-
-  /** ZNode prefix used by processes acquiring reader locks */
-  protected static final String READ_LOCK_CHILD_NODE_PREFIX = "read-";
-
-  /** ZNode prefix used by processes acquiring writer locks */
-  protected static final String WRITE_LOCK_CHILD_NODE_PREFIX = "write-";
-
-  protected final ZooKeeperWatcher zkWatcher;
-  protected final String parentLockNode;
-  protected final String fullyQualifiedZNode;
-  protected final String childZNode;
-  protected final byte[] metadata;
-  protected final MetadataHandler handler;
-
-  // If we acquire a lock, update this field
-  protected final AtomicReference<AcquiredLock> acquiredLock =
-      new AtomicReference<AcquiredLock>(null);
-
-  /**
-   * Represents information about a lock held by this thread.
-   */
-  protected static class AcquiredLock {
-    private final String path;
-    private final int version;
-
-    /**
-     * Store information about a lock.
-     * @param path The path to a lock's ZNode
-     * @param version The current version of the lock's ZNode
-     */
-    public AcquiredLock(String path, int version) {
-      this.path = path;
-      this.version = version;
-    }
-
-    public String getPath() {
-      return path;
-    }
-
-    public int getVersion() {
-      return version;
-    }
-
-    @Override
-    public String toString() {
-      return "AcquiredLockInfo{" +
-          "path='" + path + '\'' +
-          ", version=" + version +
-          '}';
-    }
-  }
-
-  protected static class ZNodeComparator implements Comparator<String> {
-
-    public static final ZNodeComparator COMPARATOR = new ZNodeComparator();
-
-    private ZNodeComparator() {
-    }
-
-    /** Parses sequenceId from the znode name. ZooKeeper documentation
-     * states: The sequence number is always fixed length of 10 digits, 0 padded
-     */
-    public static long getChildSequenceId(String childZNode) {
-      Preconditions.checkNotNull(childZNode);
-      assert childZNode.length() >= 10;
-      String sequenceIdStr = childZNode.substring(childZNode.length() - 10);
-      return Long.parseLong(sequenceIdStr);
-    }
-
-    @Override
-    public int compare(String zNode1, String zNode2) {
-      long seq1 = getChildSequenceId(zNode1);
-      long seq2 = getChildSequenceId(zNode2);
-      if (seq1 == seq2) {
-        return 0;
-      } else {
-        return seq1 < seq2 ? -1 : 1;
-      }
-    }
-  }
-
-  /**
-   * Called by implementing classes.
-   * @param zkWatcher
-   * @param parentLockNode The lock ZNode path
-   * @param metadata
-   * @param handler
-   * @param childNode The prefix for child nodes created under the parent
-   */
-  protected ZKInterProcessLockBase(ZooKeeperWatcher zkWatcher,
-      String parentLockNode, byte[] metadata, MetadataHandler handler, String childNode) {
-    this.zkWatcher = zkWatcher;
-    this.parentLockNode = parentLockNode;
-    this.fullyQualifiedZNode = ZKUtil.joinZNode(parentLockNode, childNode);
-    this.metadata = metadata;
-    this.handler = handler;
-    this.childZNode = childNode;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void acquire() throws IOException, InterruptedException {
-    tryAcquire(-1);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public boolean tryAcquire(long timeoutMs)
-  throws IOException, InterruptedException {
-    boolean hasTimeout = timeoutMs != -1;
-    long waitUntilMs =
-        hasTimeout ?EnvironmentEdgeManager.currentTime() + timeoutMs : -1;
-    String createdZNode;
-    try {
-      createdZNode = createLockZNode();
-    } catch (KeeperException ex) {
-      throw new IOException("Failed to create znode: " + fullyQualifiedZNode, ex);
-    }
-    while (true) {
-      List<String> children;
-      try {
-        children = ZKUtil.listChildrenNoWatch(zkWatcher, parentLockNode);
-      } catch (KeeperException e) {
-        LOG.error("Unexpected ZooKeeper error when listing children", e);
-        throw new IOException("Unexpected ZooKeeper exception", e);
-      }
-      String pathToWatch;
-      if ((pathToWatch = getLockPath(createdZNode, children)) == null) {
-        break;
-      }
-      CountDownLatch deletedLatch = new CountDownLatch(1);
-      String zkPathToWatch =
-          ZKUtil.joinZNode(parentLockNode, pathToWatch);
-      DeletionListener deletionListener =
-          new DeletionListener(zkWatcher, zkPathToWatch, deletedLatch);
-      zkWatcher.registerListener(deletionListener);
-      try {
-        if (ZKUtil.setWatchIfNodeExists(zkWatcher, zkPathToWatch)) {
-          // Wait for the watcher to fire
-          if (hasTimeout) {
-            long remainingMs = waitUntilMs - EnvironmentEdgeManager.currentTime();
-            if (remainingMs < 0 ||
-                !deletedLatch.await(remainingMs, TimeUnit.MILLISECONDS)) {
-              LOG.warn("Unable to acquire the lock in " + timeoutMs +
-                  " milliseconds.");
-              try {
-                ZKUtil.deleteNode(zkWatcher, createdZNode);
-              } catch (KeeperException e) {
-                LOG.warn("Unable to remove ZNode " + createdZNode);
-              }
-              return false;
-            }
-          } else {
-            deletedLatch.await();
-          }
-          if (deletionListener.hasException()) {
-            Throwable t = deletionListener.getException();
-            throw new IOException("Exception in the watcher", t);
-          }
-        }
-      } catch (KeeperException e) {
-        throw new IOException("Unexpected ZooKeeper exception", e);
-      } finally {
-        zkWatcher.unregisterListener(deletionListener);
-      }
-    }
-    updateAcquiredLock(createdZNode);
-    LOG.debug("Acquired a lock for " + createdZNode);
-    return true;
-  }
-
-  private String createLockZNode() throws KeeperException {
-    try {
-      return ZKUtil.createNodeIfNotExistsNoWatch(zkWatcher, fullyQualifiedZNode,
-          metadata, CreateMode.EPHEMERAL_SEQUENTIAL);
-    } catch (KeeperException.NoNodeException nne) {
-      //create parents, retry
-      ZKUtil.createWithParents(zkWatcher, parentLockNode);
-      return createLockZNode();
-    }
-  }
-
-  /**
-   * Check if a child znode represents a read lock.
-   * @param child The child znode we want to check.
-   * @return whether the child znode represents a read lock
-   */
-  protected static boolean isChildReadLock(String child) {
-    int idx = child.lastIndexOf(ZKUtil.ZNODE_PATH_SEPARATOR);
-    String suffix = child.substring(idx + 1);
-    return suffix.startsWith(READ_LOCK_CHILD_NODE_PREFIX);
-  }
-
-  /**
-   * Check if a child znode represents a write lock.
-   * @param child The child znode we want to check.
-   * @return whether the child znode represents a write lock
-   */
-  protected static boolean isChildWriteLock(String child) {
-    int idx = child.lastIndexOf(ZKUtil.ZNODE_PATH_SEPARATOR);
-    String suffix = child.substring(idx + 1);
-    return suffix.startsWith(WRITE_LOCK_CHILD_NODE_PREFIX);
-  }
-
-  /**
-   * Check if a child znode represents the same type(read or write) of lock
-   * @param child The child znode we want to check.
-   * @return whether the child znode represents the same type(read or write) of lock
-   */
-  protected boolean isChildOfSameType(String child) {
-    int idx = child.lastIndexOf(ZKUtil.ZNODE_PATH_SEPARATOR);
-    String suffix = child.substring(idx + 1);
-    return suffix.startsWith(this.childZNode);
-  }
-
-  /**
-   * Update state as to indicate that a lock is held
-   * @param createdZNode The lock znode
-   * @throws IOException If an unrecoverable ZooKeeper error occurs
-   */
-  protected void updateAcquiredLock(String createdZNode) throws IOException {
-    Stat stat = new Stat();
-    byte[] data = null;
-    Exception ex = null;
-    try {
-      data = ZKUtil.getDataNoWatch(zkWatcher, createdZNode, stat);
-    } catch (KeeperException e) {
-      LOG.warn("Cannot getData for znode:" + createdZNode, e);
-      ex = e;
-    }
-    if (data == null) {
-      LOG.error("Can't acquire a lock on a non-existent node " + createdZNode);
-      throw new IllegalStateException("ZNode " + createdZNode +
-          "no longer exists!", ex);
-    }
-    AcquiredLock newLock = new AcquiredLock(createdZNode, stat.getVersion());
-    if (!acquiredLock.compareAndSet(null, newLock)) {
-      LOG.error("The lock " + fullyQualifiedZNode +
-          " has already been acquired by another process!");
-      throw new IllegalStateException(fullyQualifiedZNode +
-          " is held by another process");
-    }
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  public void release() throws IOException, InterruptedException {
-    AcquiredLock lock = acquiredLock.get();
-    if (lock == null) {
-      LOG.error("Cannot release lock" +
-          ", process does not have a lock for " + fullyQualifiedZNode);
-      throw new IllegalStateException("No lock held for " + fullyQualifiedZNode);
-    }
-    try {
-      if (ZKUtil.checkExists(zkWatcher, lock.getPath()) != -1) {
-        boolean ret = ZKUtil.deleteNode(zkWatcher, lock.getPath(), lock.getVersion());
-        if (!ret && ZKUtil.checkExists(zkWatcher, lock.getPath()) != -1) {
-          throw new IllegalStateException("Couldn't delete " + lock.getPath());
-        }
-        if (!acquiredLock.compareAndSet(lock, null)) {
-          LOG.debug("Current process no longer holds " + lock + " for " +
-              fullyQualifiedZNode);
-          throw new IllegalStateException("Not holding a lock for " +
-              fullyQualifiedZNode +"!");
-        }
-      }
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Released " + lock.getPath());
-      }
-    } catch (BadVersionException e) {
-      throw new IllegalStateException(e);
-    } catch (KeeperException e) {
-      throw new IOException(e);
-    }
-  }
-
-  /**
-   * Process metadata stored in a ZNode using a callback
-   * <p>
-   * @param lockZNode The node holding the metadata
-   * @return True if metadata was ready and processed, false otherwise.
-   */
-  protected boolean handleLockMetadata(String lockZNode) {
-    return handleLockMetadata(lockZNode, handler);
-  }
-
-  /**
-   * Process metadata stored in a ZNode using a callback object passed to
-   * this instance.
-   * <p>
-   * @param lockZNode The node holding the metadata
-   * @param handler the metadata handler
-   * @return True if metadata was ready and processed, false on exception.
-   */
-  protected boolean handleLockMetadata(String lockZNode, MetadataHandler handler) {
-    if (handler == null) {
-      return false;
-    }
-    try {
-      byte[] metadata = ZKUtil.getData(zkWatcher, lockZNode);
-      handler.handleMetadata(metadata);
-    } catch (KeeperException ex) {
-      LOG.warn("Error processing lock metadata in " + lockZNode);
-      return false;
-    } catch (InterruptedException e) {
-      LOG.warn("InterruptedException processing lock metadata in " + lockZNode);
-      Thread.currentThread().interrupt();
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public void reapAllLocks() throws IOException {
-    reapExpiredLocks(0);
-  }
-
-  /**
-   * Will delete all lock znodes of this type (either read or write) which are "expired"
-   * according to timeout. Assumption is that the clock skew between zookeeper and this servers
-   * is negligible.
-   * Referred in zk recipe as "Revocable Shared Locks with Freaking Laser Beams".
-   * (http://zookeeper.apache.org/doc/trunk/recipes.html).
-   */
-  public void reapExpiredLocks(long timeout) throws IOException {
-    List<String> children;
-    try {
-      children = ZKUtil.listChildrenNoWatch(zkWatcher, parentLockNode);
-    } catch (KeeperException e) {
-      LOG.error("Unexpected ZooKeeper error when listing children", e);
-      throw new IOException("Unexpected ZooKeeper exception", e);
-    }
-    if (children == null) return;
-
-    KeeperException deferred = null;
-    Stat stat = new Stat();
-    long expireDate = System.currentTimeMillis() - timeout; //we are using cTime in zookeeper
-    for (String child : children) {
-      if (isChildOfSameType(child)) {
-        String znode = ZKUtil.joinZNode(parentLockNode, child);
-        try {
-          ZKUtil.getDataNoWatch(zkWatcher, znode, stat);
-          if (stat.getCtime() < expireDate) {
-            LOG.info("Reaping lock for znode:" + znode);
-            ZKUtil.deleteNodeFailSilent(zkWatcher, znode);
-          }
-        } catch (KeeperException ex) {
-          LOG.warn("Error reaping the znode for write lock :" + znode);
-          deferred = ex;
-        }
-      }
-    }
-    if (deferred != null) {
-      throw new IOException("ZK exception while reaping locks:", deferred);
-    }
-  }
-
-  /**
-   * Visits the locks (both held and attempted) with the given MetadataHandler.
-   * @throws IOException If there is an unrecoverable error
-   */
-  public void visitLocks(MetadataHandler handler) throws IOException {
-    List<String> children;
-    try {
-      children = ZKUtil.listChildrenNoWatch(zkWatcher, parentLockNode);
-    } catch (KeeperException e) {
-      LOG.error("Unexpected ZooKeeper error when listing children", e);
-      throw new IOException("Unexpected ZooKeeper exception", e);
-    }
-    if (children != null && children.size() > 0) {
-      for (String child : children) {
-        if (isChildOfSameType(child)) {
-          String znode = ZKUtil.joinZNode(parentLockNode, child);
-          String childWatchesZNode = getLockPath(child, children);
-          if (childWatchesZNode == null) {
-            LOG.info("Lock is held by: " + child);
-          }
-          handleLockMetadata(znode, handler);
-        }
-      }
-    }
-  }
-
-  /**
-   * Determine based on a list of children under a ZNode, whether or not a
-   * process which created a specified ZNode has obtained a lock. If a lock is
-   * not obtained, return the path that we should watch awaiting its deletion.
-   * Otherwise, return null.
-   * This method is abstract as the logic for determining whether or not a
-   * lock is obtained depends on the type of lock being implemented.
-   * @param myZNode The ZNode created by the process attempting to acquire
-   *                a lock
-   * @param children List of all child ZNodes under the lock's parent ZNode
-   * @return The path to watch, or null if myZNode can represent a correctly
-   *         acquired lock.
-   */
-  protected abstract String getLockPath(String myZNode, List<String> children)
-  throws IOException;
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadLock.java
deleted file mode 100644
index 8d20001..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadLock.java
+++ /dev/null
@@ -1,73 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper.lock;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.SortedSet;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-
-/**
- * ZooKeeper based read lock: does not exclude other read locks, but excludes
- * and is excluded by write locks.
- */
-@InterfaceAudience.Private
-public class ZKInterProcessReadLock extends ZKInterProcessLockBase {
-
-  private static final Log LOG = LogFactory.getLog(ZKInterProcessReadLock.class);
-
-  public ZKInterProcessReadLock(ZooKeeperWatcher zooKeeperWatcher,
-      String znode, byte[] metadata, MetadataHandler handler) {
-    super(zooKeeperWatcher, znode, metadata, handler, READ_LOCK_CHILD_NODE_PREFIX);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected String getLockPath(String createdZNode, List<String> children) throws IOException {
-    TreeSet<String> writeChildren =
-        new TreeSet<String>(ZNodeComparator.COMPARATOR);
-    for (String child : children) {
-      if (isChildWriteLock(child)) {
-        writeChildren.add(child);
-      }
-    }
-    if (writeChildren.isEmpty()) {
-      return null;
-    }
-    SortedSet<String> lowerChildren = writeChildren.headSet(createdZNode);
-    if (lowerChildren.isEmpty()) {
-      return null;
-    }
-    String pathToWatch = lowerChildren.last();
-    String nodeHoldingLock = lowerChildren.first();
-    String znode = ZKUtil.joinZNode(parentLockNode, nodeHoldingLock);
-    handleLockMetadata(znode);
-
-    return pathToWatch;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadWriteLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadWriteLock.java
deleted file mode 100644
index caa0f64..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessReadWriteLock.java
+++ /dev/null
@@ -1,66 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper.lock;
-
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
-import org.apache.hadoop.hbase.InterProcessReadWriteLock;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-
-/**
- * ZooKeeper based implementation of {@link InterProcessReadWriteLock}. This lock is fair,
- * not reentrant, and not revocable.
- */
-@InterfaceAudience.Private
-public class ZKInterProcessReadWriteLock implements InterProcessReadWriteLock {
-
-  private final ZooKeeperWatcher zkWatcher;
-  private final String znode;
-  private final MetadataHandler handler;
-
-  /**
-   * Creates a DistributedReadWriteLock instance.
-   * @param zkWatcher
-   * @param znode ZNode path for the lock
-   * @param handler An object that will handle de-serializing and processing
-   *                the metadata associated with reader or writer locks
-   *                created by this object or null if none desired.
-   */
-  public ZKInterProcessReadWriteLock(ZooKeeperWatcher zkWatcher, String znode,
-      MetadataHandler handler) {
-    this.zkWatcher = zkWatcher;
-    this.znode = znode;
-    this.handler = handler;
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public ZKInterProcessReadLock readLock(byte[] metadata) {
-    return new ZKInterProcessReadLock(zkWatcher, znode, metadata, handler);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  public ZKInterProcessWriteLock writeLock(byte[] metadata) {
-    return new ZKInterProcessWriteLock(zkWatcher, znode, metadata, handler);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4fdd6ff9/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessWriteLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessWriteLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessWriteLock.java
deleted file mode 100644
index 6663539..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessWriteLock.java
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper.lock;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.TreeSet;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-
-/**
- * ZooKeeper based write lock:
- */
-@InterfaceAudience.Private
-public class ZKInterProcessWriteLock extends ZKInterProcessLockBase {
-
-  private static final Log LOG = LogFactory.getLog(ZKInterProcessWriteLock.class);
-
-  public ZKInterProcessWriteLock(ZooKeeperWatcher zooKeeperWatcher,
-      String znode, byte[] metadata, MetadataHandler handler) {
-    super(zooKeeperWatcher, znode, metadata, handler, WRITE_LOCK_CHILD_NODE_PREFIX);
-  }
-
-  /**
-   * {@inheritDoc}
-   */
-  @Override
-  protected String getLockPath(String createdZNode, List<String> children) throws IOException {
-    TreeSet<String> sortedChildren =
-        new TreeSet<String>(ZNodeComparator.COMPARATOR);
-    sortedChildren.addAll(children);
-    String pathToWatch = sortedChildren.lower(createdZNode);
-    if (pathToWatch != null) {
-      String nodeHoldingLock = sortedChildren.first();
-      String znode = ZKUtil.joinZNode(parentLockNode, nodeHoldingLock);
-      handleLockMetadata(znode);
-    }
-    return pathToWatch;
-  }
-}


[29/50] [abbrv] hbase git commit: HBASE-17357 FIX PerformanceEvaluation parameters parsing triggers NPE.

Posted by el...@apache.org.
HBASE-17357 FIX PerformanceEvaluation parameters parsing triggers NPE.

check command name is not null, if null print usage and exit

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c74cf129
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c74cf129
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c74cf129

Branch: refs/heads/HBASE-16961
Commit: c74cf12925b810b7a59c5b639834508f00054053
Parents: 7901805
Author: Dave Navarro <mr...@gmail.com>
Authored: Fri Jan 20 10:41:36 2017 -0600
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 22:51:57 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/PerformanceEvaluation.java    | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c74cf129/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index d1fb7f8..7f1c640 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -2223,6 +2223,12 @@ public class PerformanceEvaluation extends Configured implements Tool {
         throw new IllegalArgumentException("Number of clients must be > 0");
       }
 
+      // cmdName should not be null, print help and exit
+      if (opts.cmdName == null) {
+        printUsage();
+        return errCode;
+      }
+
       Class<? extends Test> cmdClass = determineCommandClass(opts.cmdName);
       if (cmdClass != null) {
         runTest(cmdClass, opts);


[17/50] [abbrv] hbase git commit: HBASE-17480 Remove split region code from Region Server (Stephen Yuan Jiang)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/bff7c4f1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
deleted file mode 100644
index 1c31d21..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
+++ /dev/null
@@ -1,402 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Matchers.eq;
-import static org.mockito.Mockito.*;
-
-import org.mockito.Mockito;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
-import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
-import org.apache.hadoop.hbase.io.hfile.LruBlockCache;
-import org.apache.hadoop.hbase.testclassification.RegionServerTests;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.PairOfSameType;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.zookeeper.KeeperException;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import com.google.common.collect.ImmutableList;
-
-/**
- * Test the {@link SplitTransactionImpl} class against an HRegion (as opposed to
- * running cluster).
- */
-@Category({RegionServerTests.class, SmallTests.class})
-public class TestSplitTransaction {
-  private final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private final Path testdir =
-    TEST_UTIL.getDataTestDir(this.getClass().getName());
-  private HRegion parent;
-  private WALFactory wals;
-  private FileSystem fs;
-  private static final byte [] STARTROW = new byte [] {'a', 'a', 'a'};
-  // '{' is next ascii after 'z'.
-  private static final byte [] ENDROW = new byte [] {'{', '{', '{'};
-  private static final byte [] GOOD_SPLIT_ROW = new byte [] {'d', 'd', 'd'};
-  private static final byte [] CF = HConstants.CATALOG_FAMILY;
-
-  private static boolean preRollBackCalled = false;
-  private static boolean postRollBackCalled = false;
-
-  @Before public void setup() throws IOException {
-    this.fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    TEST_UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, CustomObserver.class.getName());
-    this.fs.delete(this.testdir, true);
-    final Configuration walConf = new Configuration(TEST_UTIL.getConfiguration());
-    FSUtils.setRootDir(walConf, this.testdir);
-    this.wals = new WALFactory(walConf, null, this.getClass().getName());
-
-    this.parent = createRegion(this.testdir, this.wals);
-    RegionCoprocessorHost host = new RegionCoprocessorHost(this.parent, null, TEST_UTIL.getConfiguration());
-    this.parent.setCoprocessorHost(host);
-    TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster", true);
-  }
-
-  @After public void teardown() throws IOException {
-    if (this.parent != null && !this.parent.isClosed()) this.parent.close();
-    Path regionDir = this.parent.getRegionFileSystem().getRegionDir();
-    if (this.fs.exists(regionDir) && !this.fs.delete(regionDir, true)) {
-      throw new IOException("Failed delete of " + regionDir);
-    }
-    if (this.wals != null) {
-      this.wals.close();
-    }
-    this.fs.delete(this.testdir, true);
-  }
-
-  @Test public void testFailAfterPONR() throws IOException, KeeperException {
-    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
-    assertTrue(rowcount > 0);
-    int parentRowCount = TEST_UTIL.countRows(this.parent);
-    assertEquals(rowcount, parentRowCount);
-
-    // Start transaction.
-    SplitTransactionImpl st = prepareGOOD_SPLIT_ROW();
-    SplitTransactionImpl spiedUponSt = spy(st);
-    Mockito
-        .doThrow(new MockedFailedDaughterOpen())
-        .when(spiedUponSt)
-        .openDaughterRegion((Server) Mockito.anyObject(),
-            (HRegion) Mockito.anyObject());
-
-    // Run the execute.  Look at what it returns.
-    boolean expectedException = false;
-    Server mockServer = Mockito.mock(Server.class);
-    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-    try {
-      spiedUponSt.execute(mockServer, null);
-    } catch (IOException e) {
-      if (e.getCause() != null &&
-          e.getCause() instanceof MockedFailedDaughterOpen) {
-        expectedException = true;
-      }
-    }
-    assertTrue(expectedException);
-    // Run rollback returns that we should restart.
-    assertFalse(spiedUponSt.rollback(null, null));
-    // Make sure that region a and region b are still in the filesystem, that
-    // they have not been removed; this is supposed to be the case if we go
-    // past point of no return.
-    Path tableDir =  this.parent.getRegionFileSystem().getTableDir();
-    Path daughterADir = new Path(tableDir, spiedUponSt.getFirstDaughter().getEncodedName());
-    Path daughterBDir = new Path(tableDir, spiedUponSt.getSecondDaughter().getEncodedName());
-    assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterADir));
-    assertTrue(TEST_UTIL.getTestFileSystem().exists(daughterBDir));
-  }
-
-  /**
-   * Test straight prepare works.  Tries to split on {@link #GOOD_SPLIT_ROW}
-   * @throws IOException
-   */
-  @Test public void testPrepare() throws IOException {
-    prepareGOOD_SPLIT_ROW();
-  }
-
-  private SplitTransactionImpl prepareGOOD_SPLIT_ROW() throws IOException {
-    return prepareGOOD_SPLIT_ROW(this.parent);
-  }
-
-  private SplitTransactionImpl prepareGOOD_SPLIT_ROW(final HRegion parentRegion)
-      throws IOException {
-    SplitTransactionImpl st = new SplitTransactionImpl(parentRegion, GOOD_SPLIT_ROW);
-    assertTrue(st.prepare());
-    return st;
-  }
-
-  /**
-   * Pass a reference store
-   */
-  @Test public void testPrepareWithRegionsWithReference() throws IOException {
-    HStore storeMock = Mockito.mock(HStore.class);
-    when(storeMock.hasReferences()).thenReturn(true);
-    when(storeMock.getFamily()).thenReturn(new HColumnDescriptor("cf"));
-    when(storeMock.getSizeToFlush()).thenReturn(new MemstoreSize());
-    when(storeMock.close()).thenReturn(ImmutableList.<StoreFile>of());
-    this.parent.stores.put(Bytes.toBytes(""), storeMock);
-
-    SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW);
-
-    assertFalse("a region should not be splittable if it has instances of store file references",
-                st.prepare());
-  }
-
-  /**
-   * Test SplitTransactionListener
-   */
-  @Test public void testSplitTransactionListener() throws IOException {
-    SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW);
-    SplitTransaction.TransactionListener listener =
-            Mockito.mock(SplitTransaction.TransactionListener.class);
-    st.registerTransactionListener(listener);
-    st.prepare();
-    Server mockServer = Mockito.mock(Server.class);
-    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-    PairOfSameType<Region> daughters = st.execute(mockServer, null);
-    verify(listener).transition(st, SplitTransaction.SplitTransactionPhase.STARTED,
-            SplitTransaction.SplitTransactionPhase.PREPARED);
-    verify(listener, times(15)).transition(any(SplitTransaction.class),
-            any(SplitTransaction.SplitTransactionPhase.class),
-            any(SplitTransaction.SplitTransactionPhase.class));
-    verifyNoMoreInteractions(listener);
-  }
-
-  /**
-   * Pass an unreasonable split row.
-   */
-  @Test public void testPrepareWithBadSplitRow() throws IOException {
-    // Pass start row as split key.
-    SplitTransactionImpl st = new SplitTransactionImpl(this.parent, STARTROW);
-    assertFalse(st.prepare());
-    st = new SplitTransactionImpl(this.parent, HConstants.EMPTY_BYTE_ARRAY);
-    assertFalse(st.prepare());
-    st = new SplitTransactionImpl(this.parent, new byte [] {'A', 'A', 'A'});
-    assertFalse(st.prepare());
-    st = new SplitTransactionImpl(this.parent, ENDROW);
-    assertFalse(st.prepare());
-  }
-
-  @Test public void testPrepareWithClosedRegion() throws IOException {
-    this.parent.close();
-    SplitTransactionImpl st = new SplitTransactionImpl(this.parent, GOOD_SPLIT_ROW);
-    assertFalse(st.prepare());
-  }
-
-  @Test public void testWholesomeSplit() throws IOException {
-    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF, true);
-    assertTrue(rowcount > 0);
-    int parentRowCount = TEST_UTIL.countRows(this.parent);
-    assertEquals(rowcount, parentRowCount);
-
-    // Pretend region's blocks are not in the cache, used for
-    // testWholesomeSplitWithHFileV1
-    CacheConfig cacheConf = new CacheConfig(TEST_UTIL.getConfiguration());
-    ((LruBlockCache) cacheConf.getBlockCache()).clearCache();
-
-    // Start transaction.
-    SplitTransactionImpl st = prepareGOOD_SPLIT_ROW();
-
-    // Run the execute.  Look at what it returns.
-    Server mockServer = Mockito.mock(Server.class);
-    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-    PairOfSameType<Region> daughters = st.execute(mockServer, null);
-    // Do some assertions about execution.
-    assertTrue(this.fs.exists(this.parent.getRegionFileSystem().getSplitsDir()));
-    // Assert the parent region is closed.
-    assertTrue(this.parent.isClosed());
-
-    // Assert splitdir is empty -- because its content will have been moved out
-    // to be under the daughter region dirs.
-    assertEquals(0, this.fs.listStatus(this.parent.getRegionFileSystem().getSplitsDir()).length);
-    // Check daughters have correct key span.
-    assertTrue(Bytes.equals(parent.getRegionInfo().getStartKey(),
-      daughters.getFirst().getRegionInfo().getStartKey()));
-    assertTrue(Bytes.equals(GOOD_SPLIT_ROW, daughters.getFirst().getRegionInfo().getEndKey()));
-    assertTrue(Bytes.equals(daughters.getSecond().getRegionInfo().getStartKey(), GOOD_SPLIT_ROW));
-    assertTrue(Bytes.equals(parent.getRegionInfo().getEndKey(),
-      daughters.getSecond().getRegionInfo().getEndKey()));
-    // Count rows. daughters are already open
-    int daughtersRowCount = 0;
-    for (Region openRegion: daughters) {
-      try {
-        int count = TEST_UTIL.countRows(openRegion);
-        assertTrue(count > 0 && count != rowcount);
-        daughtersRowCount += count;
-      } finally {
-        HBaseTestingUtility.closeRegionAndWAL(openRegion);
-      }
-    }
-    assertEquals(rowcount, daughtersRowCount);
-    // Assert the write lock is no longer held on parent
-    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
-  }
-
-  @Test
-  public void testCountReferencesFailsSplit() throws IOException {
-    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
-    assertTrue(rowcount > 0);
-    int parentRowCount = TEST_UTIL.countRows(this.parent);
-    assertEquals(rowcount, parentRowCount);
-
-    // Start transaction.
-    HRegion spiedRegion = spy(this.parent);
-    SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion);
-    SplitTransactionImpl spiedUponSt = spy(st);
-    doThrow(new IOException("Failing split. Expected reference file count isn't equal."))
-        .when(spiedUponSt).assertReferenceFileCount(anyInt(),
-        eq(new Path(this.parent.getRegionFileSystem().getTableDir(),
-            st.getSecondDaughter().getEncodedName())));
-
-    // Run the execute.  Look at what it returns.
-    boolean expectedException = false;
-    Server mockServer = Mockito.mock(Server.class);
-    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-    try {
-      spiedUponSt.execute(mockServer, null);
-    } catch (IOException e) {
-      expectedException = true;
-    }
-    assertTrue(expectedException);
-  }
-
-
-  @Test public void testRollback() throws IOException {
-    final int rowcount = TEST_UTIL.loadRegion(this.parent, CF);
-    assertTrue(rowcount > 0);
-    int parentRowCount = TEST_UTIL.countRows(this.parent);
-    assertEquals(rowcount, parentRowCount);
-
-    // Start transaction.
-    HRegion spiedRegion = spy(this.parent);
-    SplitTransactionImpl st = prepareGOOD_SPLIT_ROW(spiedRegion);
-    SplitTransactionImpl spiedUponSt = spy(st);
-    doNothing().when(spiedUponSt).assertReferenceFileCount(anyInt(),
-        eq(parent.getRegionFileSystem().getSplitsDir(st.getFirstDaughter())));
-    when(spiedRegion.createDaughterRegionFromSplits(spiedUponSt.getSecondDaughter())).
-        thenThrow(new MockedFailedDaughterCreation());
-    // Run the execute.  Look at what it returns.
-    boolean expectedException = false;
-    Server mockServer = Mockito.mock(Server.class);
-    when(mockServer.getConfiguration()).thenReturn(TEST_UTIL.getConfiguration());
-    try {
-      spiedUponSt.execute(mockServer, null);
-    } catch (MockedFailedDaughterCreation e) {
-      expectedException = true;
-    }
-    assertTrue(expectedException);
-    // Run rollback
-    assertTrue(spiedUponSt.rollback(null, null));
-
-    // Assert I can scan parent.
-    int parentRowCount2 = TEST_UTIL.countRows(this.parent);
-    assertEquals(parentRowCount, parentRowCount2);
-
-    // Assert rollback cleaned up stuff in fs
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getFirstDaughter())));
-    assertTrue(!this.fs.exists(HRegion.getRegionDir(this.testdir, st.getSecondDaughter())));
-    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
-
-    // Now retry the split but do not throw an exception this time.
-    assertTrue(st.prepare());
-    PairOfSameType<Region> daughters = st.execute(mockServer, null);
-    // Count rows. daughters are already open
-    int daughtersRowCount = 0;
-    for (Region openRegion: daughters) {
-      try {
-        int count = TEST_UTIL.countRows(openRegion);
-        assertTrue(count > 0 && count != rowcount);
-        daughtersRowCount += count;
-      } finally {
-        HBaseTestingUtility.closeRegionAndWAL(openRegion);
-      }
-    }
-    assertEquals(rowcount, daughtersRowCount);
-    // Assert the write lock is no longer held on parent
-    assertTrue(!this.parent.lock.writeLock().isHeldByCurrentThread());
-    assertTrue("Rollback hooks should be called.", wasRollBackHookCalled());
-  }
-
-  private boolean wasRollBackHookCalled(){
-    return (preRollBackCalled && postRollBackCalled);
-  }
-
-  /**
-   * Exception used in this class only.
-   */
-  @SuppressWarnings("serial")
-  private class MockedFailedDaughterCreation extends IOException {}
-  private class MockedFailedDaughterOpen extends IOException {}
-
-  HRegion createRegion(final Path testdir, final WALFactory wals)
-  throws IOException {
-    // Make a region with start and end keys. Use 'aaa', to 'AAA'.  The load
-    // region utility will add rows between 'aaa' and 'zzz'.
-    HTableDescriptor htd = new HTableDescriptor(TableName.valueOf("table"));
-    HColumnDescriptor hcd = new HColumnDescriptor(CF);
-    htd.addFamily(hcd);
-    HRegionInfo hri = new HRegionInfo(htd.getTableName(), STARTROW, ENDROW);
-    HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, testdir, TEST_UTIL.getConfiguration(),
-        htd);
-    HBaseTestingUtility.closeRegionAndWAL(r);
-    return HRegion.openHRegion(testdir, hri, htd,
-      wals.getWAL(hri.getEncodedNameAsBytes(), hri.getTable().getNamespace()),
-      TEST_UTIL.getConfiguration());
-  }
-
-  public static class CustomObserver extends BaseRegionObserver{
-    @Override
-    public void preRollBackSplit(
-        ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
-      preRollBackCalled = true;
-    }
-
-    @Override
-    public void postRollBackSplit(
-        ObserverContext<RegionCoprocessorEnvironment> ctx) throws IOException {
-      postRollBackCalled = true;
-    }
-  }
-
-}
-


[40/50] [abbrv] hbase git commit: HBASE-17067 Procedure v2 - remove zklock/tryLock and use wait/wake (Matteo Bertozzi)

Posted by el...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/980c8c20/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
index a8192be..3e88890 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.hbase.master.procedure;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
-import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
@@ -32,7 +31,6 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
 import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.util.StringUtils;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
@@ -68,60 +66,6 @@ public class TestMasterProcedureSchedulerConcurrency {
     queue.clear();
   }
 
-  @Test(timeout=60000)
-  public void testConcurrentCreateDelete() throws Exception {
-    final MasterProcedureScheduler procQueue = queue;
-    final TableName table = TableName.valueOf("testtb");
-    final AtomicBoolean running = new AtomicBoolean(true);
-    final AtomicBoolean failure = new AtomicBoolean(false);
-    Thread createThread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          TestTableProcedure proc = new TestTableProcedure(1, table,
-              TableProcedureInterface.TableOperationType.CREATE);
-          while (running.get() && !failure.get()) {
-            if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
-              procQueue.releaseTableExclusiveLock(proc, table);
-            }
-          }
-        } catch (Throwable e) {
-          LOG.error("create failed", e);
-          failure.set(true);
-        }
-      }
-    };
-
-    Thread deleteThread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          TestTableProcedure proc = new TestTableProcedure(2, table,
-              TableProcedureInterface.TableOperationType.DELETE);
-          while (running.get() && !failure.get()) {
-            if (procQueue.tryAcquireTableExclusiveLock(proc, table)) {
-              procQueue.releaseTableExclusiveLock(proc, table);
-            }
-            procQueue.markTableAsDeleted(table, proc);
-          }
-        } catch (Throwable e) {
-          LOG.error("delete failed", e);
-          failure.set(true);
-        }
-      }
-    };
-
-    createThread.start();
-    deleteThread.start();
-    for (int i = 0; i < 100 && running.get() && !failure.get(); ++i) {
-      Thread.sleep(100);
-    }
-    running.set(false);
-    createThread.join();
-    deleteThread.join();
-    assertEquals(false, failure.get());
-  }
-
   /**
    * Verify that "write" operations for a single table are serialized,
    * but different tables can be executed in parallel.
@@ -237,26 +181,22 @@ public class TestMasterProcedureSchedulerConcurrency {
 
     public Procedure acquire() {
       Procedure proc = null;
-      boolean avail = false;
-      while (!avail) {
-        proc = queue.poll();
-        if (proc == null) break;
+      boolean waiting = true;
+      while (waiting && queue.size() > 0) {
+        proc = queue.poll(100000000L);
+        if (proc == null) continue;
         switch (getTableOperationType(proc)) {
           case CREATE:
           case DELETE:
           case EDIT:
-            avail = queue.tryAcquireTableExclusiveLock(proc, getTableName(proc));
+            waiting = queue.waitTableExclusiveLock(proc, getTableName(proc));
             break;
           case READ:
-            avail = queue.tryAcquireTableSharedLock(proc, getTableName(proc));
+            waiting = queue.waitTableSharedLock(proc, getTableName(proc));
             break;
           default:
             throw new UnsupportedOperationException();
         }
-        if (!avail) {
-          addFront(proc);
-          LOG.debug("yield procId=" + proc);
-        }
       }
       return proc;
     }
@@ -266,10 +206,10 @@ public class TestMasterProcedureSchedulerConcurrency {
         case CREATE:
         case DELETE:
         case EDIT:
-          queue.releaseTableExclusiveLock(proc, getTableName(proc));
+          queue.wakeTableExclusiveLock(proc, getTableName(proc));
           break;
         case READ:
-          queue.releaseTableSharedLock(proc, getTableName(proc));
+          queue.wakeTableSharedLock(proc, getTableName(proc));
           break;
       }
     }


[27/50] [abbrv] hbase git commit: HBASE-17488 WALEdit should be lazily instantiated (ChiaPing Tsai)

Posted by el...@apache.org.
HBASE-17488 WALEdit should be lazily instantiated (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2285c57a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2285c57a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2285c57a

Branch: refs/heads/HBASE-16961
Commit: 2285c57a352aa4607636205a8b99ecb8ea1ff51e
Parents: 2ee3c73
Author: Michael Stack <st...@apache.org>
Authored: Fri Jan 20 09:37:48 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 20 09:37:48 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HRegion.java       | 19 ++++++++++++++++---
 1 file changed, 16 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2285c57a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0b93cb1..5fc53d8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3234,11 +3234,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         if (fromCP != null) {
           cellCount += fromCP.size();
         }
-        for (List<Cell> cells : familyMaps[i].values()) {
-          cellCount += cells.size();
+        if (getEffectiveDurability(mutation.getDurability()) != Durability.SKIP_WAL) {
+          for (List<Cell> cells : familyMaps[i].values()) {
+            cellCount += cells.size();
+          }
         }
       }
-      walEdit = new WALEdit(cellCount, replay);
       lock(this.updatesLock.readLock(), numReadyToWrite);
       locked = true;
 
@@ -3260,6 +3261,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
             if (cpMutations == null) {
               continue;
             }
+            Mutation mutation = batchOp.getMutation(i);
+            boolean skipWal = getEffectiveDurability(mutation.getDurability()) == Durability.SKIP_WAL;
             // Else Coprocessor added more Mutations corresponding to the Mutation at this index.
             for (int j = 0; j < cpMutations.length; j++) {
               Mutation cpMutation = cpMutations[j];
@@ -3272,12 +3275,22 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
               // Returned mutations from coprocessor correspond to the Mutation at index i. We can
               // directly add the cells from those mutations to the familyMaps of this mutation.
               mergeFamilyMaps(familyMaps[i], cpFamilyMap); // will get added to the memstore later
+
+              // The durability of returned mutation is replaced by the corresponding mutation.
+              // If the corresponding mutation contains the SKIP_WAL, we shouldn't count the
+              // cells of returned mutation.
+              if (!skipWal) {
+                for (List<Cell> cells : cpFamilyMap.values()) {
+                  cellCount += cells.size();
+                }
+              }
             }
           }
         }
       }
 
       // STEP 3. Build WAL edit
+      walEdit = new WALEdit(cellCount, replay);
       Durability durability = Durability.USE_DEFAULT;
       for (int i = firstIndex; i < lastIndexExclusive; i++) {
         // Skip puts that were determined to be invalid during preprocessing


[11/50] [abbrv] hbase git commit: HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)

Posted by el...@apache.org.
HBASE-17470 Remove merge region code from region server (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/805d39fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/805d39fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/805d39fc

Branch: refs/heads/HBASE-16961
Commit: 805d39fca6b6be004d2c554cc5d4f76bf48bc01a
Parents: 287f95a
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Tue Jan 17 15:39:51 2017 -0800
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Tue Jan 17 15:39:51 2017 -0800

----------------------------------------------------------------------
 .../hbase/client/ConnectionImplementation.java  |    7 -
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   63 -
 .../hbase/shaded/protobuf/RequestConverter.java |   18 -
 .../shaded/protobuf/generated/AdminProtos.java  | 1759 +-------
 .../generated/MasterProcedureProtos.java        | 1864 +-------
 .../shaded/protobuf/generated/MasterProtos.java | 4160 ++++++------------
 .../src/main/protobuf/Admin.proto               |   19 -
 .../src/main/protobuf/Master.proto              |   19 -
 .../src/main/protobuf/MasterProcedure.proto     |   15 -
 .../BaseMasterAndRegionObserver.java            |   12 +
 .../hbase/coprocessor/BaseMasterObserver.java   |   12 +
 .../hbase/coprocessor/MasterObserver.java       |    8 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   50 -
 .../hbase/master/MasterCoprocessorHost.java     |   22 -
 .../hadoop/hbase/master/MasterRpcServices.java  |   46 -
 .../hadoop/hbase/master/MasterServices.java     |   17 -
 .../hadoop/hbase/master/ServerManager.java      |   31 -
 .../DispatchMergingRegionsProcedure.java        |  579 ---
 .../hbase/regionserver/CompactSplitThread.java  |   14 -
 .../hadoop/hbase/regionserver/HRegion.java      |  106 -
 .../hbase/regionserver/RSRpcServices.java       |   42 -
 .../hbase/regionserver/RegionMergeRequest.java  |  153 -
 .../RegionMergeTransactionFactory.java          |   76 -
 .../RegionMergeTransactionImpl.java             |  742 ----
 .../org/apache/hadoop/hbase/util/HMerge.java    |  348 --
 .../org/apache/hadoop/hbase/util/Merge.java     |  264 --
 .../apache/hadoop/hbase/client/TestAdmin1.java  |   50 +-
 .../coprocessor/TestRegionServerObserver.java   |  223 -
 .../hbase/master/MockNoopMasterServices.java    |   10 -
 .../hadoop/hbase/master/MockRegionServer.java   |    9 -
 .../TestDispatchMergingRegionsProcedure.java    |  264 --
 .../hadoop/hbase/regionserver/TestHRegion.java  |   49 -
 .../TestRegionMergeTransaction.java             |  485 --
 .../hadoop/hbase/util/TestMergeTable.java       |  172 -
 .../apache/hadoop/hbase/util/TestMergeTool.java |  301 --
 35 files changed, 1650 insertions(+), 10359 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index cfed9f6..adbc7f9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -1312,13 +1312,6 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       }
 
       @Override
-      public MasterProtos.DispatchMergingRegionsResponse dispatchMergingRegions(
-          RpcController controller, MasterProtos.DispatchMergingRegionsRequest request)
-          throws ServiceException {
-        return stub.dispatchMergingRegions(controller, request);
-      }
-
-      @Override
       public MasterProtos.MergeTableRegionsResponse mergeTableRegions(
           RpcController controller, MasterProtos.MergeTableRegionsRequest request)
           throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 585a5f8..d862d5f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -121,7 +121,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerIn
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetServerInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetStoreFileResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.MergeRegionsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ServerInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest;
@@ -1950,46 +1949,6 @@ public final class ProtobufUtil {
     }
   }
 
-  /**
-   * A helper to merge regions using admin protocol. Send request to
-   * regionserver.
-   * @param admin
-   * @param region_a
-   * @param region_b
-   * @param forcible true if do a compulsory merge, otherwise we will only merge
-   *          two adjacent regions
-   * @param user effective user
-   * @throws IOException
-   */
-  public static void mergeRegions(final RpcController controller,
-      final AdminService.BlockingInterface admin,
-      final HRegionInfo region_a, final HRegionInfo region_b,
-      final boolean forcible, final User user) throws IOException {
-    final MergeRegionsRequest request = ProtobufUtil.buildMergeRegionsRequest(
-        region_a.getRegionName(), region_b.getRegionName(),forcible);
-    if (user != null) {
-      try {
-        user.runAs(new PrivilegedExceptionAction<Void>() {
-          @Override
-          public Void run() throws Exception {
-            admin.mergeRegions(controller, request);
-            return null;
-          }
-        });
-      } catch (InterruptedException ie) {
-        InterruptedIOException iioe = new InterruptedIOException();
-        iioe.initCause(ie);
-        throw iioe;
-      }
-    } else {
-      try {
-        admin.mergeRegions(controller, request);
-      } catch (ServiceException se) {
-        throw ProtobufUtil.getRemoteException(se);
-      }
-    }
-  }
-
 // End helpers for Admin
 
   /*
@@ -3225,28 +3184,6 @@ public final class ProtobufUtil {
    }
 
   /**
-   * Create a MergeRegionsRequest for the given regions
-   * @param regionA name of region a
-   * @param regionB name of region b
-   * @param forcible true if it is a compulsory merge
-   * @return a MergeRegionsRequest
-   */
-  public static MergeRegionsRequest buildMergeRegionsRequest(
-      final byte[] regionA, final byte[] regionB, final boolean forcible) {
-    MergeRegionsRequest.Builder builder = MergeRegionsRequest.newBuilder();
-    RegionSpecifier regionASpecifier = RequestConverter.buildRegionSpecifier(
-        RegionSpecifierType.REGION_NAME, regionA);
-    RegionSpecifier regionBSpecifier = RequestConverter.buildRegionSpecifier(
-        RegionSpecifierType.REGION_NAME, regionB);
-    builder.setRegionA(regionASpecifier);
-    builder.setRegionB(regionBSpecifier);
-    builder.setForcible(forcible);
-    // send the master's wall clock time as well, so that the RS can refer to it
-    builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
-    return builder.build();
-  }
-
-  /**
    * Get a ServerName from the passed in data bytes.
    * @param data Data with a serialize server name in it; can handle the old style
    * servername where servername was host and port.  Works too with data that

http://git-wip-us.apache.org/repos/asf/hbase/blob/805d39fc/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 4acb525..fd08d98 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -84,7 +84,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTabl
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
@@ -1089,23 +1088,6 @@ public final class RequestConverter {
     return builder.build();
   }
 
-  public static DispatchMergingRegionsRequest buildDispatchMergingRegionsRequest(
-      final byte[] encodedNameOfRegionA,
-      final byte[] encodedNameOfRegionB,
-      final boolean forcible,
-      final long nonceGroup,
-      final long nonce) throws DeserializationException {
-    DispatchMergingRegionsRequest.Builder builder = DispatchMergingRegionsRequest.newBuilder();
-    builder.setRegionA(buildRegionSpecifier(
-        RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionA));
-    builder.setRegionB(buildRegionSpecifier(
-        RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfRegionB));
-    builder.setForcible(forcible);
-    builder.setNonceGroup(nonceGroup);
-    builder.setNonce(nonce);
-    return builder.build();
-  }
-
   public static MergeTableRegionsRequest buildMergeTableRegionsRequest(
       final byte[][] encodedNameOfdaughaterRegions,
       final boolean forcible,