You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2016/06/11 04:55:47 UTC

[01/50] hbase git commit: HBASE-15918 Cleanup excludes/includes file after use in hbase-personality.sh to avoid asf license error. (Apekshit)

Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 a11091c49 -> bd45cf347


HBASE-15918 Cleanup excludes/includes file after use in hbase-personality.sh to avoid asf license error. (Apekshit)

Change-Id: I3a47113ef9f598f90357b7cb580cbdc56f3c46f4

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75c23605
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75c23605
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75c23605

Branch: refs/heads/hbase-12439
Commit: 75c23605430266da0f30eef04b97ebd4b30c60b8
Parents: f0a1e22
Author: Apekshit <ap...@gmail.com>
Authored: Mon May 30 20:22:29 2016 -0700
Committer: stack <st...@apache.org>
Committed: Mon May 30 21:30:19 2016 -0700

----------------------------------------------------------------------
 dev-support/hbase-personality.sh | 2 ++
 1 file changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/75c23605/dev-support/hbase-personality.sh
----------------------------------------------------------------------
diff --git a/dev-support/hbase-personality.sh b/dev-support/hbase-personality.sh
index 6797090..3b6ebad 100755
--- a/dev-support/hbase-personality.sh
+++ b/dev-support/hbase-personality.sh
@@ -104,6 +104,7 @@ function personality_modules
           if [[ -n "${excludes}" ]]; then
             extra="${extra} -Dtest.exclude.pattern=${excludes}"
           fi
+          rm excludes
         else
           echo "Wget error $? in fetching excludes file from url" \
                "${EXCLUDE_TESTS_URL}. Ignoring and proceeding."
@@ -115,6 +116,7 @@ function personality_modules
           if [[ -n "${includes}" ]]; then
             extra="${extra} -Dtest=${includes}"
           fi
+          rm includes
         else
           echo "Wget error $? in fetching includes file from url" \
                "${INCLUDE_TESTS_URL}. Ignoring and proceeding."


[34/50] hbase git commit: Revert "HBASE-15967 Metric for active ipc Readers and make default fraction of cpu count" Revert mistaken commit This reverts commit 1125215aad3f5b149f3458ba7019c5920f6dca66.

Posted by sy...@apache.org.
Revert "HBASE-15967 Metric for active ipc Readers and make default fraction of cpu count"
Revert mistaken commit
This reverts commit 1125215aad3f5b149f3458ba7019c5920f6dca66.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d5a2593
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d5a2593
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d5a2593

Branch: refs/heads/hbase-12439
Commit: 6d5a25935e5ce983e14eff576a699ed1948566d2
Parents: 2da090f
Author: stack <st...@apache.org>
Authored: Tue Jun 7 16:41:01 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 16:41:01 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/MetricsHBaseServerSource.java     | 11 +++-------
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 19 ----------------
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 23 +++++---------------
 3 files changed, 8 insertions(+), 45 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5a2593/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 43515cd..ce57e0f 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -86,13 +86,6 @@ public interface MetricsHBaseServerSource extends BaseSource {
   String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " +
       "rest of the requests will have to be retried.";
 
-  String RUNNING_READERS = "runningReaders";
-  String RUNNING_READERS_DESCRIPTION =
-      "Count of Reader threads currently busy parsing requests to hand off to the scheduler";
-
-  void incrRunningReaders();
-  void decrRunningReaders();
-
   void authorizationSuccess();
 
   void authorizationFailure();
@@ -129,4 +122,6 @@ public interface MetricsHBaseServerSource extends BaseSource {
   void processedCall(int processingTime);
 
   void queuedAndProcessedCall(int totalTime);
-}
\ No newline at end of file
+
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5a2593/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
index 24cc0fb..c72641d 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
@@ -57,12 +57,6 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
   private MetricHistogram requestSize;
   private MetricHistogram responseSize;
 
-  /**
-   * The count of readers currently working parsing a request as opposed to being blocked on the
-   * selector waiting on requests to come in.
-   */
-  private final MutableFastCounter runningReaders;
-
   public MetricsHBaseServerSourceImpl(String metricsName,
                                       String metricsDescription,
                                       String metricsContext,
@@ -92,9 +86,6 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
     this.exceptionsMultiTooLarge = this.getMetricsRegistry()
         .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L);
 
-    this.runningReaders = this.getMetricsRegistry()
-        .newCounter(RUNNING_READERS, RUNNING_READERS_DESCRIPTION, 0L);
-
     this.authenticationSuccesses = this.getMetricsRegistry().newCounter(
         AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L);
     this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME,
@@ -118,16 +109,6 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
   }
 
   @Override
-  public void incrRunningReaders() {
-    this.runningReaders.incr(+1);
-  }
-
-  @Override
-  public void decrRunningReaders() {
-    this.runningReaders.incr(-1);
-  }
-
-  @Override
   public void authorizationSuccess() {
     authorizationSuccesses.incr();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d5a2593/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index c9d2639..aca3fdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -625,8 +625,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     public Listener(final String name) throws IOException {
       super(name);
-      // The backlog of requests that we will have the serversocket carry. It is not enough
-      // just setting this config. You need to set the backlog in the kernel too.
+      // The backlog of requests that we will have the serversocket carry.
       int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
       // Create a new server socket and set to non blocking mode
       acceptChannel = ServerSocketChannel.open();
@@ -691,12 +690,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
               iter.remove();
               if (key.isValid()) {
                 if (key.isReadable()) {
-                  metrics.getMetricsSource().incrRunningReaders();
-                  try {
-                    doRead(key);
-                  } finally {
-                    metrics.getMetricsSource().decrRunningReaders();
-                  }
+                  doRead(key);
                 }
               }
               key = null;
@@ -740,9 +734,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             iter.remove();
             try {
               if (key.isValid()) {
-                if (key.isAcceptable()) {
+                if (key.isAcceptable())
                   doAccept(key);
-                }
               }
             } catch (IOException ignored) {
               if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
@@ -837,8 +830,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       try {
         count = c.readAndProcess();
       } catch (InterruptedException ieo) {
-        LOG.info(Thread.currentThread().getName() +
-            ": readAndProcess caught InterruptedException", ieo);
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         if (LOG.isDebugEnabled()) {
@@ -1167,7 +1159,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private ByteBuffer dataLengthBuffer;
     protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<Call>();
     private final Lock responseWriteLock = new ReentrantLock();
-    // EXPENSIVE: Counters cost lots of CPU. Remove. Used just to see if idle or not. Use boolean.
     private Counter rpcCount = new Counter(); // number of outstanding rpcs
     private long lastContact;
     private InetAddress addr;
@@ -2009,11 +2000,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     // See declaration above for documentation on what this size is.
     this.maxQueueSizeInBytes =
       this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
-    // Have the Reader thread count default to 1/4 of the processors. This seems to do pretty
-    // well. See the metric hbase.regionserver.ipc.runningReaders to see if you need to change it.
-    int processors = Runtime.getRuntime().availableProcessors();
-    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size",
-        Math.max(8, processors/ 4));
+    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
     this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
       2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);


[35/50] hbase git commit: Revert "HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers"" Revert mistaken commit... This reverts commit e0b70c00e74aeaac33570508e3732a53daea839e.

Posted by sy...@apache.org.
Revert "HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers""
Revert mistaken commit...
This reverts commit e0b70c00e74aeaac33570508e3732a53daea839e.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e66ecd7d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e66ecd7d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e66ecd7d

Branch: refs/heads/hbase-12439
Commit: e66ecd7db68d6ef57084543d08f7774c82f22f45
Parents: 6d5a259
Author: stack <st...@apache.org>
Authored: Tue Jun 7 16:41:30 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 16:41:30 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/MetricsHBaseServerSource.java     |  10 +-
 .../ipc/MetricsHBaseServerWrapperImpl.java      |   6 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 408 ++++++++-----------
 .../regionserver/SimpleRpcSchedulerFactory.java |   2 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java       |   2 +-
 5 files changed, 187 insertions(+), 241 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e66ecd7d/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index ce57e0f..bb89789 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -52,16 +52,14 @@ public interface MetricsHBaseServerSource extends BaseSource {
   String TOTAL_CALL_TIME_NAME = "totalCallTime";
   String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time.";
   String QUEUE_SIZE_NAME = "queueSize";
-  String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " +
-    "parsed and is waiting to run or is currently being executed.";
+  String QUEUE_SIZE_DESC = "Number of bytes in the call queues.";
   String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue";
-  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
-    "parsed requests waiting in scheduler to be executed";
+  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue.";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
-      "Number of calls in the replication call queue waiting to be run";
-  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run";
+      "Number of calls in the replication call queue.";
+  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";

http://git-wip-us.apache.org/repos/asf/hbase/blob/e66ecd7d/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
index 4f53709..9979c75 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
@@ -36,7 +36,7 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
     if (!isServerStarted()) {
       return 0;
     }
-    return server.callQueueSizeInBytes.get();
+    return server.callQueueSize.get();
   }
 
   @Override
@@ -65,10 +65,10 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
 
   @Override
   public int getNumOpenConnections() {
-    if (!isServerStarted()) {
+    if (!isServerStarted() || this.server.connectionList == null) {
       return 0;
     }
-    return server.getNumOpenConnections();
+    return server.connectionList.size();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e66ecd7d/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index aca3fdd..483ce86 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -48,16 +48,15 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 import java.util.Set;
-import java.util.Timer;
-import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -114,7 +113,6 @@ import org.apache.hadoop.hbase.util.Counter;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
@@ -185,6 +183,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
 
+  /**
+   * The maximum size that we can hold in the RPC queue
+   */
+  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
+
   private final IPCUtil ipcUtil;
 
   private static final String AUTH_FAILED_FOR = "Auth failed for ";
@@ -207,30 +210,22 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   protected int port;                             // port we listen on
   protected InetSocketAddress address;            // inet address we listen on
   private int readThreads;                        // number of read threads
+  protected int maxIdleTime;                      // the maximum idle time after
+                                                  // which a client may be
+                                                  // disconnected
+  protected int thresholdIdleConnections;         // the number of idle
+                                                  // connections after which we
+                                                  // will start cleaning up idle
+                                                  // connections
+  int maxConnectionsToNuke;                       // the max number of
+                                                  // connections to nuke
+                                                  // during a cleanup
+
   protected MetricsHBaseServer metrics;
 
   protected final Configuration conf;
 
-  /**
-   * Maximum size in bytes of the currently queued and running Calls. If a new Call puts us over
-   * this size, then we will reject the call (after parsing it though). It will go back to the
-   * client and client will retry. Set this size with "hbase.ipc.server.max.callqueue.size". The
-   * call queue size gets incremented after we parse a call and before we add it to the queue of
-   * calls for the scheduler to use. It get decremented after we have 'run' the Call. The current
-   * size is kept in {@link #callQueueSizeInBytes}.
-   * @see {@link #callQueueSizeInBytes}
-   * @see {@link #DEFAULT_MAX_CALLQUEUE_SIZE}
-   * @see {@link #callQueueSizeInBytes}
-   */
-  private final long maxQueueSizeInBytes;
-  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
-
-  /**
-   * This is a running count of the size in bytes of all outstanding calls whether currently
-   * executing or queued waiting to be run.
-   */
-  protected final Counter callQueueSizeInBytes = new Counter();
-
+  private int maxQueueSize;
   protected int socketSendBufferSize;
   protected final boolean tcpNoDelay;   // if T then disable Nagle's Algorithm
   protected final boolean tcpKeepAlive; // if T then use keepalives
@@ -249,11 +244,19 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   volatile boolean started = false;
 
-  // maintains the set of client connections and handles idle timeouts
-  private ConnectionManager connectionManager;
+  /**
+   * This is a running count of the size of all outstanding calls by size.
+   */
+  protected final Counter callQueueSize = new Counter();
+
+  protected final List<Connection> connectionList =
+    Collections.synchronizedList(new LinkedList<Connection>());
+  //maintain a list
+  //of client connections
   private Listener listener = null;
   protected Responder responder = null;
   protected AuthenticationTokenSecretManager authTokenSecretMgr = null;
+  protected int numConnections = 0;
 
   protected HBaseRPCErrorHandler errorHandler = null;
 
@@ -620,13 +623,18 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private Selector selector = null; //the selector that we use for the server
     private Reader[] readers = null;
     private int currentReader = 0;
+    private Random rand = new Random();
+    private long lastCleanupRunTime = 0; //the last time when a cleanup connec-
+                                         //-tion (for idle connections) ran
+    private long cleanupInterval = 10000; //the minimum interval between
+                                          //two cleanup runs
+    private int backlogLength;
 
     private ExecutorService readPool;
 
     public Listener(final String name) throws IOException {
       super(name);
-      // The backlog of requests that we will have the serversocket carry.
-      int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+      backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
       // Create a new server socket and set to non blocking mode
       acceptChannel = ServerSocketChannel.open();
       acceptChannel.configureBlocking(false);
@@ -636,11 +644,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
       address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
       // create a selector;
-      selector = Selector.open();
+      selector= Selector.open();
 
       readers = new Reader[readThreads];
-      // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it
-      // has an advantage in that it is easy to shutdown the pool.
       readPool = Executors.newFixedThreadPool(readThreads,
         new ThreadFactoryBuilder().setNameFormat(
           "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
@@ -661,12 +667,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
 
     private class Reader implements Runnable {
+      private volatile boolean adding = false;
       private final Selector readSelector;
 
       Reader() throws IOException {
         this.readSelector = Selector.open();
       }
-
       @Override
       public void run() {
         try {
@@ -680,10 +686,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         }
       }
 
-      private void doRunLoop() {
+      private synchronized void doRunLoop() {
         while (running) {
           try {
             readSelector.select();
+            while (adding) {
+              this.wait(1000);
+            }
+
             Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
             while (iter.hasNext()) {
               SelectionKey key = iter.next();
@@ -693,12 +703,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
                   doRead(key);
                 }
               }
-              key = null;
             }
           } catch (InterruptedException e) {
-            if (running) {                      // unexpected -- log it
-              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
-            }
+            LOG.debug("Interrupted while sleeping");
             return;
           } catch (IOException ex) {
             LOG.info(getName() + ": IOException in Reader", ex);
@@ -707,14 +714,76 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
 
       /**
-       * Updating the readSelector while it's being used is not thread-safe,
-       * so the connection must be queued.  The reader will drain the queue
-       * and update its readSelector before performing the next select
+       * This gets reader into the state that waits for the new channel
+       * to be registered with readSelector. If it was waiting in select()
+       * the thread will be woken up, otherwise whenever select() is called
+       * it will return even if there is nothing to read and wait
+       * in while(adding) for finishAdd call
        */
-      public void addConnection(Connection conn) throws IOException {
-        conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
+      public void startAdd() {
+        adding = true;
         readSelector.wakeup();
       }
+
+      public synchronized SelectionKey registerChannel(SocketChannel channel)
+        throws IOException {
+        return channel.register(readSelector, SelectionKey.OP_READ);
+      }
+
+      public synchronized void finishAdd() {
+        adding = false;
+        this.notify();
+      }
+    }
+
+    /** cleanup connections from connectionList. Choose a random range
+     * to scan and also have a limit on the number of the connections
+     * that will be cleanedup per run. The criteria for cleanup is the time
+     * for which the connection was idle. If 'force' is true then all
+     * connections will be looked at for the cleanup.
+     * @param force all connections will be looked at for cleanup
+     */
+    private void cleanupConnections(boolean force) {
+      if (force || numConnections > thresholdIdleConnections) {
+        long currentTime = System.currentTimeMillis();
+        if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
+          return;
+        }
+        int start = 0;
+        int end = numConnections - 1;
+        if (!force) {
+          start = rand.nextInt() % numConnections;
+          end = rand.nextInt() % numConnections;
+          int temp;
+          if (end < start) {
+            temp = start;
+            start = end;
+            end = temp;
+          }
+        }
+        int i = start;
+        int numNuked = 0;
+        while (i <= end) {
+          Connection c;
+          synchronized (connectionList) {
+            try {
+              c = connectionList.get(i);
+            } catch (Exception e) {return;}
+          }
+          if (c.timedOut(currentTime)) {
+            if (LOG.isDebugEnabled())
+              LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
+            closeConnection(c);
+            numNuked++;
+            end--;
+            //noinspection UnusedAssignment
+            c = null;
+            if (!force && numNuked == maxConnectionsToNuke) break;
+          }
+          else i++;
+        }
+        lastCleanupRunTime = System.currentTimeMillis();
+      }
     }
 
     @Override
@@ -723,7 +792,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         "it will have per impact")
     public void run() {
       LOG.info(getName() + ": starting");
-      connectionManager.startIdleScan();
       while (running) {
         SelectionKey key = null;
         try {
@@ -747,7 +815,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             if (errorHandler.checkOOME(e)) {
               LOG.info(getName() + ": exiting on OutOfMemoryError");
               closeCurrentConnection(key, e);
-              connectionManager.closeIdle(true);
+              cleanupConnections(true);
               return;
             }
           } else {
@@ -756,18 +824,22 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             // some thread(s) a chance to finish
             LOG.warn(getName() + ": OutOfMemoryError in server select", e);
             closeCurrentConnection(key, e);
-            connectionManager.closeIdle(true);
+            cleanupConnections(true);
             try {
               Thread.sleep(60000);
             } catch (InterruptedException ex) {
               LOG.debug("Interrupted while sleeping");
+              return;
             }
           }
         } catch (Exception e) {
           closeCurrentConnection(key, e);
         }
+        cleanupConnections(false);
       }
+
       LOG.info(getName() + ": stopping");
+
       synchronized (this) {
         try {
           acceptChannel.close();
@@ -779,9 +851,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         selector= null;
         acceptChannel= null;
 
-        // close all connections
-        connectionManager.stopIdleScan();
-        connectionManager.closeAll();
+        // clean up all connections
+        while (!connectionList.isEmpty()) {
+          closeConnection(connectionList.remove(0));
+        }
       }
     }
 
@@ -789,6 +862,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       if (key != null) {
         Connection c = (Connection)key.attachment();
         if (c != null) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() +
+                (e != null ? " on error " + e.getMessage() : ""));
+          }
           closeConnection(c);
           key.attach(null);
         }
@@ -799,24 +876,37 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return address;
     }
 
-    void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
+    void doAccept(SelectionKey key) throws IOException, OutOfMemoryError {
+      Connection c;
       ServerSocketChannel server = (ServerSocketChannel) key.channel();
+
       SocketChannel channel;
       while ((channel = server.accept()) != null) {
-        channel.configureBlocking(false);
-        channel.socket().setTcpNoDelay(tcpNoDelay);
-        channel.socket().setKeepAlive(tcpKeepAlive);
+        try {
+          channel.configureBlocking(false);
+          channel.socket().setTcpNoDelay(tcpNoDelay);
+          channel.socket().setKeepAlive(tcpKeepAlive);
+        } catch (IOException ioe) {
+          channel.close();
+          throw ioe;
+        }
+
         Reader reader = getReader();
-        Connection c = connectionManager.register(channel);
-        // If the connectionManager can't take it, close the connection.
-        if (c == null) {
-          if (channel.isOpen()) {
-            IOUtils.cleanup(null, channel);
+        try {
+          reader.startAdd();
+          SelectionKey readKey = reader.registerChannel(channel);
+          c = getConnection(channel, System.currentTimeMillis());
+          readKey.attach(c);
+          synchronized (connectionList) {
+            connectionList.add(numConnections, c);
+            numConnections++;
           }
-          continue;
+          if (LOG.isDebugEnabled())
+            LOG.debug(getName() + ": connection from " + c.toString() +
+                "; # active connections: " + numConnections);
+        } finally {
+          reader.finishAdd();
         }
-        key.attach(c);  // so closeCurrentConnection can get the object
-        reader.addConnection(c);
       }
     }
 
@@ -829,8 +919,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       c.setLastContact(System.currentTimeMillis());
       try {
         count = c.readAndProcess();
+
+        if (count > 0) {
+          c.setLastContact(System.currentTimeMillis());
+        }
+
       } catch (InterruptedException ieo) {
-        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         if (LOG.isDebugEnabled()) {
@@ -839,10 +933,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         count = -1; //so that the (count < 0) block is executed
       }
       if (count < 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(getName() + ": DISCONNECTING client " + c.toString() +
+              " because read count=" + count +
+              ". Number of active connections: " + numConnections);
+        }
         closeConnection(c);
-        c = null;
-      } else {
-        c.setLastContact(System.currentTimeMillis());
       }
     }
 
@@ -1259,10 +1355,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return null;
     }
 
-    public long getLastContact() {
-      return lastContact;
-    }
-
     /* Return true if the connection has no outstanding rpc */
     private boolean isIdle() {
       return rpcCount.get() == 0;
@@ -1278,6 +1370,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       rpcCount.increment();
     }
 
+    protected boolean timedOut(long currentTime) {
+      return isIdle() && currentTime - lastContact > maxIdleTime;
+    }
+
     private UserGroupInformation getAuthorizedUgi(String authorizedId)
         throws IOException {
       UserGroupInformation authorizedUgi;
@@ -1787,7 +1883,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
       // Enforcing the call queue size, this triggers a retry in the client
       // This is a bit late to be doing this check - we have already read in the total request.
-      if ((totalRequestSize + callQueueSizeInBytes.get()) > maxQueueSizeInBytes) {
+      if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) {
         final Call callTooBig =
           new Call(id, this.service, null, null, null, null, this,
             responder, totalRequestSize, null, null, 0);
@@ -1858,7 +1954,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
               totalRequestSize, traceInfo, this.addr, timeout);
 
       if (!scheduler.dispatch(new CallRunner(RpcServer.this, call))) {
-        callQueueSizeInBytes.add(-1 * call.getSize());
+        callQueueSize.add(-1 * call.getSize());
 
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
@@ -1997,10 +2093,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     this.bindAddress = bindAddress;
     this.conf = conf;
     this.socketSendBufferSize = 0;
-    // See declaration above for documentation on what this size is.
-    this.maxQueueSizeInBytes =
-      this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
+    this.maxQueueSize =
+      this.conf.getInt("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
     this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
+    this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 1000);
+    this.maxConnectionsToNuke = conf.getInt("hbase.ipc.client.kill.max", 10);
+    this.thresholdIdleConnections = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
     this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
       2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);
@@ -2022,7 +2120,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     // Create the responder here
     responder = new Responder();
-    connectionManager = new ConnectionManager();
     this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
     this.userProvider = UserProvider.instantiate(conf);
     this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled();
@@ -2080,7 +2177,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   protected void closeConnection(Connection connection) {
-    connectionManager.close(connection);
+    synchronized (connectionList) {
+      if (connectionList.remove(connection)) {
+        numConnections--;
+      }
+    }
+    connection.close();
   }
 
   Configuration getConf() {
@@ -2338,7 +2440,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
   @Override
   public void addCallSize(final long diff) {
-    this.callQueueSizeInBytes.add(diff);
+    this.callQueueSize.add(diff);
   }
 
   /**
@@ -2476,14 +2578,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   /**
-   * The number of open RPC conections
-   * @return the number of open rpc connections
-   */
-  public int getNumOpenConnections() {
-    return connectionManager.size();
-  }
-
-  /**
    * Returns the username for any user associated with the current RPC
    * request or <code>null</code> if no user is set.
    */
@@ -2601,150 +2695,4 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   public RpcScheduler getScheduler() {
     return scheduler;
   }
-
-  private class ConnectionManager {
-    final private AtomicInteger count = new AtomicInteger();
-    final private Set<Connection> connections;
-
-    final private Timer idleScanTimer;
-    final private int idleScanThreshold;
-    final private int idleScanInterval;
-    final private int maxIdleTime;
-    final private int maxIdleToClose;
-
-    ConnectionManager() {
-      this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true);
-      this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
-      this.idleScanInterval =
-          conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000);
-      this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000);
-      this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10);
-      int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-          HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
-      int maxConnectionQueueSize =
-          handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100);
-      // create a set with concurrency -and- a thread-safe iterator, add 2
-      // for listener and idle closer threads
-      this.connections = Collections.newSetFromMap(
-          new ConcurrentHashMap<Connection,Boolean>(
-              maxConnectionQueueSize, 0.75f, readThreads+2));
-    }
-
-    private boolean add(Connection connection) {
-      boolean added = connections.add(connection);
-      if (added) {
-        count.getAndIncrement();
-      }
-      return added;
-    }
-
-    private boolean remove(Connection connection) {
-      boolean removed = connections.remove(connection);
-      if (removed) {
-        count.getAndDecrement();
-      }
-      return removed;
-    }
-
-    int size() {
-      return count.get();
-    }
-
-    Connection[] toArray() {
-      return connections.toArray(new Connection[0]);
-    }
-
-    Connection register(SocketChannel channel) {
-      Connection connection = new Connection(channel, System.currentTimeMillis());
-      add(connection);
-      if (LOG.isDebugEnabled()) {
-        // Use metric names
-        LOG.debug("Server connection from " + connection +
-            "; numOpenConnections=" + size() +
-            ",  queueSize(bytes)=" + callQueueSizeInBytes.get() +
-            ", numCallsInGeneralQueue=" + scheduler.getGeneralQueueLength() +
-            ", numCallsInPriorityQueue=" + scheduler.getPriorityQueueLength());
-      }
-      return connection;
-    }
-
-    boolean close(Connection connection) {
-      boolean exists = remove(connection);
-      if (exists) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(Thread.currentThread().getName() +
-              ": disconnecting client " + connection +
-              ". Number of active connections: "+ size());
-        }
-        // only close if actually removed to avoid double-closing due
-        // to possible races
-        connection.close();
-      }
-      return exists;
-    }
-
-    // synch'ed to avoid explicit invocation upon OOM from colliding with
-    // timer task firing
-    synchronized void closeIdle(boolean scanAll) {
-      long minLastContact = System.currentTimeMillis() - maxIdleTime;
-      // concurrent iterator might miss new connections added
-      // during the iteration, but that's ok because they won't
-      // be idle yet anyway and will be caught on next scan
-      int closed = 0;
-      for (Connection connection : connections) {
-        // stop if connections dropped below threshold unless scanning all
-        if (!scanAll && size() < idleScanThreshold) {
-          break;
-        }
-        // stop if not scanning all and max connections are closed
-        if (connection.isIdle() &&
-            connection.getLastContact() < minLastContact &&
-            close(connection) &&
-            !scanAll && (++closed == maxIdleToClose)) {
-          break;
-        }
-      }
-    }
-
-    void closeAll() {
-      // use a copy of the connections to be absolutely sure the concurrent
-      // iterator doesn't miss a connection
-      for (Connection connection : toArray()) {
-        close(connection);
-      }
-    }
-
-    void startIdleScan() {
-      scheduleIdleScanTask();
-    }
-
-    void stopIdleScan() {
-      idleScanTimer.cancel();
-    }
-
-    private void scheduleIdleScanTask() {
-      if (!running) {
-        return;
-      }
-      TimerTask idleScanTask = new TimerTask(){
-        @Override
-        public void run() {
-          if (!running) {
-            return;
-          }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(Thread.currentThread().getName()+": task running");
-          }
-          try {
-            closeIdle(false);
-          } finally {
-            // explicitly reschedule so next execution occurs relative
-            // to the end of this scan, not the beginning
-            scheduleIdleScanTask();
-          }
-        }
-      };
-      idleScanTimer.schedule(idleScanTask, idleScanInterval);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e66ecd7d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 743c5bb..1f496b4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -41,7 +41,7 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   @Override
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-        HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+		HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
 
     return new SimpleRpcScheduler(
       conf,

http://git-wip-us.apache.org/repos/asf/hbase/blob/e66ecd7d/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index 45cec78..ceb945b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -263,7 +263,7 @@ public abstract class AbstractTestIPC {
       fail("Expected an exception to have been thrown!");
     } catch (Exception e) {
       LOG.info("Caught expected exception: " + e.toString());
-      assertTrue(e.toString(), StringUtils.stringifyException(e).contains("Injected fault"));
+      assertTrue(StringUtils.stringifyException(e).contains("Injected fault"));
     } finally {
       rpcServer.stop();
     }


[11/50] hbase git commit: HBASE-15944 Spark test flooding mvn output. Redirect test logs to file. This doesn't fix the problem fully as I still see few logs being dumped in stdout. But it cleans up majority of the earlier dump. (Apekshit)

Posted by sy...@apache.org.
HBASE-15944 Spark test flooding mvn output. Redirect test logs to file. This doesn't fix the problem fully as I still see few logs being dumped in stdout. But it cleans up majority of the earlier dump. (Apekshit)

Change-Id: I6893301d154078a7cfb6b9af2eedc744deafb8d7

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4ffea771
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4ffea771
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4ffea771

Branch: refs/heads/hbase-12439
Commit: 4ffea7711acc33dc3c0a75479b510d0683c11523
Parents: cfe868d
Author: Apekshit <ap...@gmail.com>
Authored: Thu Jun 2 01:43:35 2016 -0700
Committer: stack <st...@apache.org>
Committed: Thu Jun 2 08:44:08 2016 -0700

----------------------------------------------------------------------
 hbase-spark/src/test/resources/log4j.properties | 10 +++++++++-
 1 file changed, 9 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4ffea771/hbase-spark/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hbase-spark/src/test/resources/log4j.properties b/hbase-spark/src/test/resources/log4j.properties
index 4eeeb2c..6d9415b 100644
--- a/hbase-spark/src/test/resources/log4j.properties
+++ b/hbase-spark/src/test/resources/log4j.properties
@@ -15,7 +15,7 @@
 # limitations under the License.
 
 # Define some default values that can be overridden by system properties
-hbase.root.logger=INFO,console
+hbase.root.logger=INFO,FA
 hbase.log.dir=.
 hbase.log.file=hbase.log
 
@@ -50,6 +50,14 @@ log4j.appender.console.target=System.err
 log4j.appender.console.layout=org.apache.log4j.PatternLayout
 log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
 
+#File Appender
+log4j.appender.FA=org.apache.log4j.FileAppender
+log4j.appender.FA.append=false
+log4j.appender.FA.file=target/log-output.txt
+log4j.appender.FA.layout=org.apache.log4j.PatternLayout
+log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+log4j.appender.FA.Threshold = INFO
+
 # Custom Logging levels
 
 #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG


[09/50] hbase git commit: HBASE-15858 Some region server group shell commands don't work

Posted by sy...@apache.org.
HBASE-15858 Some region server group shell commands don't work


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a0f49c98
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a0f49c98
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a0f49c98

Branch: refs/heads/hbase-12439
Commit: a0f49c988419d48f6c655f46ac78f8199c643b50
Parents: 53eb27b
Author: tedyu <yu...@gmail.com>
Authored: Wed Jun 1 21:13:33 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Wed Jun 1 21:13:33 2016 -0700

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb  |  2 +-
 .../src/main/ruby/shell/commands/get_server_rsgroup.rb   |  2 +-
 hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb    | 11 +++++++----
 3 files changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a0f49c98/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
index 6772aa1..ce4be71 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_rsgroup.rb
@@ -34,7 +34,7 @@ EOF
       def command(group_name)
         now = Time.now
         formatter.header(['GROUP INFORMATION'])
-        group_admin.get_rsgroup(group_name) do |s|
+        rsgroup_admin.get_rsgroup(group_name) do |s|
           formatter.row([s])
         end
         formatter.footer(now)

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0f49c98/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
index 322f6bb..a689a7c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_server_rsgroup.rb
@@ -31,7 +31,7 @@ EOF
 
       def command(server)
         now = Time.now
-        group_name = rsgroup_admin.getGroupOfServer(server).getName
+        group_name = rsgroup_admin.getRSGroupOfServer(server).getName
         formatter.row([group_name])
         formatter.footer(now, 1)
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/a0f49c98/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
index d892775..1040ed8 100644
--- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
@@ -49,12 +49,15 @@ module Hbase
       assert_not_nil(group)
       assert_equal(0, group.getServers.count)
 
-      hostport =
-          @rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next.toString
+      hostport = @rsgroup_admin.getRSGroupInfo('default').getServers.iterator.next
+      @shell.command('get_rsgroup', 'default')
+      hostPortStr = hostport.toString
+      @shell.command('get_server_rsgroup', [hostPortStr])
       @shell.command('move_rsgroup_servers',
                      group_name,
-                     [hostport])
+                     [hostPortStr])
       assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getServers.count)
+      assert_equal(group_name, @rsgroup_admin.getRSGroupOfServer(hostport).getName)
 
       @shell.command('move_rsgroup_tables',
                      group_name,
@@ -65,7 +68,7 @@ module Hbase
       @hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line|
         case count
         when 1
-          assert_equal(hostport, line)
+          assert_equal(hostPortStr, line)
         when 3
           assert_equal(table_name, line)
         end


[46/50] hbase git commit: HBASE-15990 The priority value of subsequent coprocessors in the Coprocessor.Priority.SYSTEM list are not incremented by one (ChiaPing Tsai)

Posted by sy...@apache.org.
HBASE-15990 The priority value of subsequent coprocessors in the Coprocessor.Priority.SYSTEM list are not incremented by one (ChiaPing Tsai)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/55a04b78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/55a04b78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/55a04b78

Branch: refs/heads/hbase-12439
Commit: 55a04b78102a3a919c6e2e86fcdf98dd1d9a24e4
Parents: 9012a0b
Author: tedyu <yu...@gmail.com>
Authored: Thu Jun 9 10:26:10 2016 -0700
Committer: Ted <yu...@gmail.com>
Committed: Thu Jun 9 10:26:10 2016 -0700

----------------------------------------------------------------------
 .../hbase/coprocessor/CoprocessorHost.java      |  5 ++--
 .../hbase/coprocessor/TestCoprocessorHost.java  | 26 ++++++++++++++------
 2 files changed, 21 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/55a04b78/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
index da0e8b1..e937569 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/CoprocessorHost.java
@@ -158,9 +158,10 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
         implClass = cl.loadClass(className);
         // Add coprocessors as we go to guard against case where a coprocessor is specified twice
         // in the configuration
-        this.coprocessors.add(loadInstance(implClass, Coprocessor.PRIORITY_SYSTEM, conf));
+        this.coprocessors.add(loadInstance(implClass, priority, conf));
         LOG.info("System coprocessor " + className + " was loaded " +
-            "successfully with priority (" + priority++ + ").");
+            "successfully with priority (" + priority + ").");
+        ++priority;
       } catch (Throwable t) {
         // We always abort if system coprocessors cannot be loaded
         abortServer(className, t);

http://git-wip-us.apache.org/repos/asf/hbase/blob/55a04b78/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java
index 58cd0fb..66b5c60 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorHost.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
@@ -51,9 +53,8 @@ public class TestCoprocessorHost {
       return this.aborted;
     }
   }
-
   @Test
-  public void testDoubleLoading() {
+  public void testDoubleLoadingAndPriorityValue() {
     final Configuration conf = HBaseConfiguration.create();
     CoprocessorHost<CoprocessorEnvironment> host =
         new CoprocessorHost<CoprocessorEnvironment>(new TestAbortable()) {
@@ -61,7 +62,7 @@ public class TestCoprocessorHost {
 
       @Override
       public CoprocessorEnvironment createEnvironment(Class<?> implClass,
-          final Coprocessor instance, int priority, int sequence, Configuration conf) {
+          final Coprocessor instance, final int priority, int sequence, Configuration conf) {
         return new CoprocessorEnvironment() {
           final Coprocessor envInstance = instance;
 
@@ -82,7 +83,7 @@ public class TestCoprocessorHost {
 
           @Override
           public int getPriority() {
-            return 0;
+            return priority;
           }
 
           @Override
@@ -114,10 +115,19 @@ public class TestCoprocessorHost {
     };
     final String key = "KEY";
     final String coprocessor = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver";
-    // Try and load coprocessor three times.
-    conf.setStrings(key, coprocessor, coprocessor, coprocessor);
+    // Try and load a coprocessor three times
+    conf.setStrings(key, coprocessor, coprocessor, coprocessor, SimpleRegionObserverV2.class.getName());
     host.loadSystemCoprocessors(conf, key);
-    // Only one coprocessor loaded
-    Assert.assertEquals(1, host.coprocessors.size());
+    // Two coprocessors(SimpleRegionObserver and SimpleRegionObserverV2) loaded
+    Assert.assertEquals(2, host.coprocessors.size());
+    // Check the priority value
+    CoprocessorEnvironment simpleEnv = host.findCoprocessorEnvironment(SimpleRegionObserver.class.getName());
+    CoprocessorEnvironment simpleEnv_v2 = host.findCoprocessorEnvironment(SimpleRegionObserverV2.class.getName());
+    assertNotNull(simpleEnv);
+    assertNotNull(simpleEnv_v2);
+    assertEquals(Coprocessor.PRIORITY_SYSTEM, simpleEnv.getPriority());
+    assertEquals(Coprocessor.PRIORITY_SYSTEM + 1, simpleEnv_v2.getPriority());
+  }
+  public static class SimpleRegionObserverV2 extends SimpleRegionObserver {
   }
 }
\ No newline at end of file


[41/50] hbase git commit: HBASE-15994 Allow selection of RpcSchedulers Adds logging by the RpcExecutors of their run configs Changes the default RpcSchedulerFactory from SimpleRpcSchedulerFactory.class to RpcSchedulerFactoryImpl.class. RpcSchedulerFactor

Posted by sy...@apache.org.
HBASE-15994 Allow selection of RpcSchedulers Adds logging by the RpcExecutors of their run configs Changes the default RpcSchedulerFactory from SimpleRpcSchedulerFactory.class to RpcSchedulerFactoryImpl.class. RpcSchedulerFactoryImpl.class. defaults to using SimpleRpcSchedulerFactory.class and the SimpleRpcScheduler, as has been default up to this, unless you set "hbase.region.server.rpc.scheduler.class" to org.apache.hadoop.hbase.ipc.FifoRpcScheduler

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ac4a57f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ac4a57f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ac4a57f

Branch: refs/heads/hbase-12439
Commit: 3ac4a57fd205e1909c874cabd7fda9fb176f3f0f
Parents: a7172d5
Author: stack <st...@apache.org>
Authored: Wed Jun 8 09:53:51 2016 -0700
Committer: stack <st...@apache.org>
Committed: Wed Jun 8 20:20:50 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/BalancedQueueRpcExecutor.java     |  4 ++
 .../hadoop/hbase/ipc/FifoRpcScheduler.java      |  5 ++
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java    | 54 ++++++++-------
 .../regionserver/FifoRpcSchedulerFactory.java   | 47 +++++++++++++
 .../hbase/regionserver/RpcSchedulerFactory.java |  4 +-
 .../regionserver/SimpleRpcSchedulerFactory.java |  6 +-
 .../regionserver/TestRpcSchedulerFactory.java   | 71 ++++++++++++++++++++
 7 files changed, 161 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
index e4205eb..3505221 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
 @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX })
 @InterfaceStability.Evolving
 public class BalancedQueueRpcExecutor extends RpcExecutor {
+  private static final Log LOG = LogFactory.getLog(BalancedQueueRpcExecutor.class);
 
   protected final List<BlockingQueue<CallRunner>> queues;
   private final QueueBalancer balancer;
@@ -62,6 +65,7 @@ public class BalancedQueueRpcExecutor extends RpcExecutor {
     queues = new ArrayList<BlockingQueue<CallRunner>>(numQueues);
     this.balancer = getBalancer(numQueues);
     initializeQueues(numQueues, queueClass, initargs);
+    LOG.debug(name + " queues=" + numQueues + " handlerCount=" + handlerCount);
   }
 
   protected void initializeQueues(final int numQueues,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index ee36f3f..70d903a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DaemonThreadFactory;
 
@@ -32,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger;
  * This can be used for HMaster, where no prioritization is needed.
  */
 public class FifoRpcScheduler extends RpcScheduler {
+  private static final Log LOG = LogFactory.getLog(FifoRpcScheduler.class);
   private final int handlerCount;
   private final int maxQueueLength;
   private final AtomicInteger queueSize = new AtomicInteger(0);
@@ -41,6 +44,8 @@ public class FifoRpcScheduler extends RpcScheduler {
     this.handlerCount = handlerCount;
     this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH,
         handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+    LOG.info("Using " + this.getClass().getSimpleName() + " as user call queue; handlerCount=" +
+        handlerCount + "; maxQueueLength=" + maxQueueLength);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index 431aeeb..d9d61c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -34,8 +34,11 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.util.BoundedPriorityBlockingQueue;
 
 /**
- * A scheduler that maintains isolated handler pools for general,
- * high-priority, and replication requests.
+ * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'),
+ * high-priority ('priority'), and replication ('replication') requests. Default behavior is to
+ * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc.
+ * See below article for explanation of options.
+ * @see <a href="http://blog.cloudera.com/blog/2014/12/new-in-cdh-5-2-improvements-for-running-multiple-workloads-on-a-single-hbase-cluster/">Overview on Request Queuing</a>
  */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
@@ -49,7 +52,8 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
   public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY =
       "hbase.ipc.server.callqueue.handler.factor";
 
-  /** If set to 'deadline', uses a priority queue and deprioritize long-running scans */
+  /** If set to 'deadline', the default, uses a priority queue and deprioritizes long-running scans
+   */
   public static final String CALL_QUEUE_TYPE_CONF_KEY = "hbase.ipc.server.callqueue.type";
   public static final String CALL_QUEUE_TYPE_CODEL_CONF_VALUE = "codel";
   public static final String CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE = "deadline";
@@ -190,54 +194,58 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
 
     float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
     int numCallQueues = Math.max(1, (int)Math.round(handlerCount * callQueuesHandlersFactor));
-
-    LOG.info("Using " + callQueueType + " as user call queue, count=" + numCallQueues);
-
+    LOG.info("Using " + callQueueType + " as user call queue; numCallQueues=" + numCallQueues +
+        "; callQReadShare=" + callqReadShare + ", callQScanShare=" + callqScanShare);
     if (numCallQueues > 1 && callqReadShare > 0) {
       // multiple read/write queues
-      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
+      if (isDeadlineQueueType(callQueueType)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
             callqReadShare, callqScanShare, maxQueueLength, conf, abortable,
             BoundedPriorityBlockingQueue.class, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
         Object[] callQueueInitArgs = {maxQueueLength, codelTargetDelay, codelInterval,
           codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches};
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount,
           numCallQueues, callqReadShare, callqScanShare,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs);
       } else {
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
           callqReadShare, callqScanShare, maxQueueLength, conf, abortable);
       }
     } else {
       // multiple queues
-      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
+      if (isDeadlineQueueType(callQueueType)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
-          conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
+        callExecutor =
+          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
+            conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
-          conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
-          codelTargetDelay, codelInterval, codelLifoThreshold,
-          numGeneralCallsDropped, numLifoModeSwitches);
+        callExecutor =
+          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
+            conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
+            codelTargetDelay, codelInterval, codelLifoThreshold,
+            numGeneralCallsDropped, numLifoModeSwitches);
       } else {
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount,
+        callExecutor = new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount,
             numCallQueues, maxQueueLength, conf, abortable);
       }
     }
-
     // Create 2 queues to help priorityExecutor be more scalable.
     this.priorityExecutor = priorityHandlerCount > 0 ?
-        new BalancedQueueRpcExecutor("Priority", priorityHandlerCount, 2, maxPriorityQueueLength) :
-        null;
-
+      new BalancedQueueRpcExecutor("BalancedQ.priority", priorityHandlerCount, 2,
+          maxPriorityQueueLength):
+      null;
    this.replicationExecutor =
-     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("Replication",
+     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("BalancedQ.replication",
        replicationHandlerCount, 1, maxQueueLength, conf, abortable) : null;
   }
 
+  private static boolean isDeadlineQueueType(final String callQueueType) {
+    return callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE);
+  }
+
   public SimpleRpcScheduler(
 	      Configuration conf,
 	      int handlerCount,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
new file mode 100644
index 0000000..f4b51ba
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+
+/**
+ * Factory to use when you want to use the {@link FifoRpcScheduler}
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FifoRpcSchedulerFactory implements RpcSchedulerFactory {
+  @Override
+  public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
+    int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+      HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+    return new FifoRpcScheduler(conf, handlerCount);
+  }
+
+  @Deprecated
+  @Override
+  public RpcScheduler create(Configuration conf, PriorityFunction priority) {
+    return create(conf, priority, null);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
index f554781..7bc59da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public interface RpcSchedulerFactory {
-
   /**
    * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}.
    */
@@ -39,5 +38,4 @@ public interface RpcSchedulerFactory {
 
   @Deprecated
   RpcScheduler create(Configuration conf, PriorityFunction priority);
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 743c5bb..92462c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
 
-/** Constructs a {@link SimpleRpcScheduler}. */
+/** Constructs a {@link SimpleRpcScheduler}.
+ */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
-
   @Override
   @Deprecated
   public RpcScheduler create(Configuration conf, PriorityFunction priority) {
@@ -42,7 +42,6 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
         HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
-
     return new SimpleRpcScheduler(
       conf,
       handlerCount,
@@ -54,5 +53,4 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
       server,
       HConstants.QOS_THRESHOLD);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3ac4a57f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
new file mode 100644
index 0000000..9366c54
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+
+/**
+ * A silly test that does nothing but make sure an rpcscheduler factory makes what it says
+ * it is going to make.
+ */
+@Category(SmallTests.class)
+public class TestRpcSchedulerFactory {
+  @Rule public TestName testName = new TestName();
+  @ClassRule public static TestRule timeout =
+      CategoryBasedTimeout.forClass(TestRpcSchedulerFactory.class);
+  private Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    this.conf = HBaseConfiguration.create();
+  }
+
+  @Test
+  public void testRWQ() {
+    // Set some configs just to see how it changes the scheduler. Can't assert the settings had
+    // an effect. Just eyeball the log.
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5);
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5);
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0.5);
+    RpcSchedulerFactory factory = new SimpleRpcSchedulerFactory();
+    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
+    assertTrue(rpcScheduler.getClass().equals(SimpleRpcScheduler.class));
+  }
+
+  @Test
+  public void testFifo() {
+    RpcSchedulerFactory factory = new FifoRpcSchedulerFactory();
+    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
+    assertTrue(rpcScheduler.getClass().equals(FifoRpcScheduler.class));
+  }
+}
\ No newline at end of file


[26/50] hbase git commit: HBASE-15954 REST server should log requests with TRACE instead of DEBUG

Posted by sy...@apache.org.
HBASE-15954 REST server should log requests with TRACE instead of DEBUG


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3d7840a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3d7840a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3d7840a1

Branch: refs/heads/hbase-12439
Commit: 3d7840a173aab97fb72409fa8c0f161fd7ad0e8f
Parents: b21c56e
Author: Enis Soztutar <en...@apache.org>
Authored: Mon Jun 6 10:58:37 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Mon Jun 6 10:58:37 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/rest/MultiRowResource.java     |  4 +-
 .../hbase/rest/NamespacesInstanceResource.java  | 24 ++++-----
 .../hadoop/hbase/rest/NamespacesResource.java   |  4 +-
 .../hbase/rest/ProtobufStreamingUtil.java       | 10 ++--
 .../apache/hadoop/hbase/rest/RESTServer.java    | 12 +++--
 .../apache/hadoop/hbase/rest/RESTServlet.java   |  5 +-
 .../hadoop/hbase/rest/RegionsResource.java      |  4 +-
 .../apache/hadoop/hbase/rest/RootResource.java  |  4 +-
 .../apache/hadoop/hbase/rest/RowResource.java   | 54 ++++++++++----------
 .../hbase/rest/ScannerInstanceResource.java     | 32 +++++++-----
 .../hadoop/hbase/rest/ScannerResource.java      | 17 +++---
 .../hadoop/hbase/rest/SchemaResource.java       | 22 ++++----
 .../rest/StorageClusterStatusResource.java      |  4 +-
 .../rest/StorageClusterVersionResource.java     |  4 +-
 .../apache/hadoop/hbase/rest/TableResource.java | 26 ++++++----
 .../hadoop/hbase/rest/VersionResource.java      | 10 ++--
 .../apache/hadoop/hbase/rest/client/Client.java | 44 ++++++++--------
 .../hadoop/hbase/rest/filter/AuthFilter.java    |  4 +-
 .../rest/filter/RestCsrfPreventionFilter.java   | 15 +++---
 .../consumer/ProtobufMessageBodyConsumer.java   |  6 +--
 .../hadoop/hbase/util/ConnectionCache.java      |  6 ++-
 21 files changed, 169 insertions(+), 142 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
index c88ac91..8ff3ef6 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java
@@ -86,7 +86,9 @@ public class MultiRowResource extends ResourceBase implements Constants {
           }
           model.addRow(rowModel);
         } else {
-          LOG.trace("The row : " + rk + " not found in the table.");
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("The row : " + rk + " not found in the table.");
+          }
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
index 8f64738..c832905 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java
@@ -91,8 +91,8 @@ public class NamespacesInstanceResource extends ResourceBase {
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context ServletContext context,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
 
@@ -135,8 +135,8 @@ public class NamespacesInstanceResource extends ResourceBase {
   @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     return processUpdate(model, true, uriInfo);
@@ -151,8 +151,8 @@ public class NamespacesInstanceResource extends ResourceBase {
   @PUT
   public Response putNoBody(final byte[] message,
       final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try{
@@ -176,8 +176,8 @@ public class NamespacesInstanceResource extends ResourceBase {
   public Response post(final NamespacesInstanceModel model,
       final @Context UriInfo uriInfo) {
 
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("POST " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("POST " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     return processUpdate(model, false, uriInfo);
@@ -192,8 +192,8 @@ public class NamespacesInstanceResource extends ResourceBase {
   @POST
   public Response postNoBody(final byte[] message,
       final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("POST " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("POST " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try{
@@ -287,8 +287,8 @@ public class NamespacesInstanceResource extends ResourceBase {
   @DELETE
   public Response deleteNoBody(final byte[] message,
       final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("DELETE " + uriInfo.getAbsolutePath());
     }
     if (servlet.isReadOnly()) {
       servlet.getMetrics().incrementFailedDeleteRequests(1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
index 0548fe8..1304fe0 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java
@@ -64,8 +64,8 @@ public class NamespacesResource extends ResourceBase {
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
index 93bb940..cb0f4c8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingUtil.java
@@ -49,8 +49,10 @@ public class ProtobufStreamingUtil implements StreamingOutput {
     this.contentType = type;
     this.limit = limit;
     this.fetchSize = fetchSize;
-    LOG.debug("Created ScanStreamingUtil with content type = " + this.contentType + " user limit : "
-        + this.limit + " scan fetch size : " + this.fetchSize);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Created ScanStreamingUtil with content type = " + this.contentType
+        + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize);
+    }
   }
 
   @Override
@@ -82,7 +84,9 @@ public class ProtobufStreamingUtil implements StreamingOutput {
     outStream.write(Bytes.toBytes((short)objectBytes.length));
     outStream.write(objectBytes);
     outStream.flush();
-    LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully.");
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Wrote " + model.getRows().size() + " rows to stream successfully.");
+    }
   }
 
   private CellSetModel createModelFromResults(Result[] results) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
index b26de54..cb37fb5 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java
@@ -168,20 +168,26 @@ public class RESTServer implements Constants {
     if (commandLine != null && commandLine.hasOption("port")) {
       String val = commandLine.getOptionValue("port");
       servlet.getConfiguration().setInt("hbase.rest.port", Integer.parseInt(val));
-      LOG.debug("port set to " + val);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("port set to " + val);
+      }
     }
 
     // check if server should only process GET requests, if so override the conf
     if (commandLine != null && commandLine.hasOption("readonly")) {
       servlet.getConfiguration().setBoolean("hbase.rest.readonly", true);
-      LOG.debug("readonly set to true");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("readonly set to true");
+      }
     }
 
     // check for user-defined info server port setting, if so override the conf
     if (commandLine != null && commandLine.hasOption("infoport")) {
       String val = commandLine.getOptionValue("infoport");
       servlet.getConfiguration().setInt("hbase.rest.info.port", Integer.parseInt(val));
-      LOG.debug("Web UI port set to " + val);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Web UI port set to " + val);
+      }
     }
 
     @SuppressWarnings("unchecked")

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
index 4da5c67..411ced8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.rest;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
@@ -30,14 +32,13 @@ import org.apache.hadoop.hbase.util.ConnectionCache;
 import org.apache.hadoop.hbase.util.JvmPauseMonitor;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
-import org.apache.log4j.Logger;
 
 /**
  * Singleton class encapsulating global REST servlet state and functions.
  */
 @InterfaceAudience.Private
 public class RESTServlet implements Constants {
-  private static final Logger LOG = Logger.getLogger(RESTServlet.class);
+  private static final Log LOG = LogFactory.getLog(RESTServlet.class);
   private static RESTServlet INSTANCE;
   private final Configuration conf;
   private final MetricsREST metrics;

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
index 48721bb..f803b26 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java
@@ -71,8 +71,8 @@ public class RegionsResource extends ResourceBase {
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
index c08bb8b..fc4c548 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java
@@ -72,8 +72,8 @@ public class RootResource extends ResourceBase {
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index 15828ce..de84625 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -85,8 +85,8 @@ public class RowResource extends ResourceBase {
   @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     MultivaluedMap<String, String> params = uriInfo.getQueryParameters();
@@ -130,8 +130,8 @@ public class RowResource extends ResourceBase {
   @GET
   @Produces(MIMETYPE_BINARY)
   public Response getBinary(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
     }
     servlet.getMetrics().incrementRequests(1);
     // doesn't make sense to use a non specific coordinate as this can only
@@ -221,8 +221,8 @@ public class RowResource extends ResourceBase {
           put.addImmutable(parts[0], parts[1], cell.getTimestamp(), cell.getValue());
         }
         puts.add(put);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("PUT " + put.toString());
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("PUT " + put.toString());
         }
       }
       table = servlet.getTable(tableResource.getName());
@@ -289,8 +289,8 @@ public class RowResource extends ResourceBase {
       put.addImmutable(parts[0], parts[1], timestamp, message);
       table = servlet.getTable(tableResource.getName());
       table.put(put);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("PUT " + put.toString());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("PUT " + put.toString());
       }
       servlet.getMetrics().incrementSucessfulPutRequests(1);
       return Response.ok().build();
@@ -301,7 +301,7 @@ public class RowResource extends ResourceBase {
       if (table != null) try {
         table.close();
       } catch (IOException ioe) {
-        LOG.debug(ioe);
+        LOG.debug("Exception received while closing the table", ioe);
       }
     }
   }
@@ -311,8 +311,8 @@ public class RowResource extends ResourceBase {
     MIMETYPE_PROTOBUF_IETF})
   public Response put(final CellSetModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath()
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath()
         + " " + uriInfo.getQueryParameters());
     }
     return update(model, true);
@@ -322,8 +322,8 @@ public class RowResource extends ResourceBase {
   @Consumes(MIMETYPE_BINARY)
   public Response putBinary(final byte[] message,
       final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY);
     }
     return updateBinary(message, headers, true);
   }
@@ -333,8 +333,8 @@ public class RowResource extends ResourceBase {
     MIMETYPE_PROTOBUF_IETF})
   public Response post(final CellSetModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("POST " + uriInfo.getAbsolutePath()
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("POST " + uriInfo.getAbsolutePath()
         + " " + uriInfo.getQueryParameters());
     }
     return update(model, false);
@@ -344,16 +344,16 @@ public class RowResource extends ResourceBase {
   @Consumes(MIMETYPE_BINARY)
   public Response postBinary(final byte[] message,
       final @Context UriInfo uriInfo, final @Context HttpHeaders headers) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY);
     }
     return updateBinary(message, headers, false);
   }
 
   @DELETE
   public Response delete(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("DELETE " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     if (servlet.isReadOnly()) {
@@ -397,8 +397,8 @@ public class RowResource extends ResourceBase {
       table = servlet.getTable(tableResource.getName());
       table.delete(delete);
       servlet.getMetrics().incrementSucessfulDeleteRequests(1);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("DELETE " + delete.toString());
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("DELETE " + delete.toString());
       }
     } catch (Exception e) {
       servlet.getMetrics().incrementFailedDeleteRequests(1);
@@ -407,7 +407,7 @@ public class RowResource extends ResourceBase {
       if (table != null) try {
         table.close();
       } catch (IOException ioe) {
-        LOG.debug(ioe);
+        LOG.debug("Exception received while closing the table", ioe);
       }
     }
     return Response.ok().build();
@@ -499,8 +499,8 @@ public class RowResource extends ResourceBase {
           .build();
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("CHECK-AND-PUT " + put.toString() + ", returns " + retValue);
       }
       if (!retValue) {
         servlet.getMetrics().incrementFailedPutRequests(1);
@@ -517,7 +517,7 @@ public class RowResource extends ResourceBase {
     } finally {
       if (table != null) try {
         table.close();
-      } catch (IOException ioe) { 
+      } catch (IOException ioe) {
         LOG.debug("Exception received while closing the table", ioe);
       }
     }
@@ -627,8 +627,8 @@ public class RowResource extends ResourceBase {
           .build();
       }
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("CHECK-AND-DELETE " + delete.toString() + ", returns "
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns "
           + retValue);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
index ffb2fae..2469faa 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java
@@ -62,7 +62,7 @@ public class ScannerInstanceResource extends ResourceBase {
 
   public ScannerInstanceResource() throws IOException { }
 
-  public ScannerInstanceResource(String table, String id, 
+  public ScannerInstanceResource(String table, String id,
       ResultGenerator generator, int batch) throws IOException {
     this.id = id;
     this.generator = generator;
@@ -72,10 +72,10 @@ public class ScannerInstanceResource extends ResourceBase {
   @GET
   @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
-  public Response get(final @Context UriInfo uriInfo, 
+  public Response get(final @Context UriInfo uriInfo,
       @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     if (generator == null) {
@@ -108,7 +108,9 @@ public class ScannerInstanceResource extends ResourceBase {
           .build();
       }
       if (value == null) {
-        LOG.info("generator exhausted");
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("generator exhausted");
+        }
         // respond with 204 (No Content) if an empty cell set would be
         // returned
         if (count == limit) {
@@ -123,7 +125,7 @@ public class ScannerInstanceResource extends ResourceBase {
       if (!Bytes.equals(CellUtil.cloneRow(value), rowKey)) {
         // if maxRows was given as a query param, stop if we would exceed the
         // specified number of rows
-        if (maxRows > 0) { 
+        if (maxRows > 0) {
           if (--maxRows == 0) {
             generator.putBack(value);
             break;
@@ -134,7 +136,7 @@ public class ScannerInstanceResource extends ResourceBase {
         rowModel = new RowModel(rowKey);
       }
       rowModel.addCell(
-        new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), 
+        new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value),
           value.getTimestamp(), CellUtil.cloneValue(value)));
     } while (--count > 0);
     model.addRow(rowModel);
@@ -147,21 +149,23 @@ public class ScannerInstanceResource extends ResourceBase {
   @GET
   @Produces(MIMETYPE_BINARY)
   public Response getBinary(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath() + " as " +
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " +
         MIMETYPE_BINARY);
     }
     servlet.getMetrics().incrementRequests(1);
     try {
       Cell value = generator.next();
       if (value == null) {
-        LOG.info("generator exhausted");
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("generator exhausted");
+        }
         return Response.noContent().build();
       }
       ResponseBuilder response = Response.ok(CellUtil.cloneValue(value));
       response.cacheControl(cacheControl);
-      response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value)));      
-      response.header("X-Column", 
+      response.header("X-Row", Base64.encodeBytes(CellUtil.cloneRow(value)));
+      response.header("X-Column",
         Base64.encodeBytes(
           KeyValue.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))));
       response.header("X-Timestamp", value.getTimestamp());
@@ -182,8 +186,8 @@ public class ScannerInstanceResource extends ResourceBase {
 
   @DELETE
   public Response delete(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("DELETE " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     if (servlet.isReadOnly()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
index 844ea3b..71723d8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java
@@ -31,7 +31,6 @@ import javax.ws.rs.PUT;
 import javax.ws.rs.Path;
 import javax.ws.rs.PathParam;
 import javax.ws.rs.core.Context;
-import javax.ws.rs.core.MultivaluedMap;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.UriBuilder;
 import javax.ws.rs.core.UriInfo;
@@ -91,7 +90,7 @@ public class ScannerResource extends ResourceBase {
       spec = new RowSpec(model.getStartRow(), endRow, model.getColumns(), model.getStartTime(),
           model.getEndTime(), model.getMaxVersions());
     }
-    
+
     try {
       Filter filter = ScannerResultGenerator.buildFilterFromModel(model);
       String tableName = tableResource.getName();
@@ -102,8 +101,8 @@ public class ScannerResource extends ResourceBase {
       ScannerInstanceResource instance =
         new ScannerInstanceResource(tableName, id, gen, model.getBatch());
       scanners.put(id, instance);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("new scanner: " + id);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("new scanner: " + id);
       }
       UriBuilder builder = uriInfo.getAbsolutePathBuilder();
       URI uri = builder.path(id).build();
@@ -129,10 +128,10 @@ public class ScannerResource extends ResourceBase {
   @PUT
   @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
-  public Response put(final ScannerModel model, 
+  public Response put(final ScannerModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath());
     }
     return update(model, true, uriInfo);
   }
@@ -142,8 +141,8 @@ public class ScannerResource extends ResourceBase {
     MIMETYPE_PROTOBUF_IETF})
   public Response post(final ScannerModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("POST " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("POST " + uriInfo.getAbsolutePath());
     }
     return update(model, false, uriInfo);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
index c0e7153..dc34f09 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java
@@ -86,8 +86,8 @@ public class SchemaResource extends ResourceBase {
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {
@@ -99,7 +99,7 @@ public class SchemaResource extends ResourceBase {
     } catch (Exception e) {
       servlet.getMetrics().incrementFailedGetRequests(1);
       return processException(e);
-    } 
+    }
   }
 
   private Response replace(final TableName name, final TableSchemaModel model,
@@ -198,10 +198,10 @@ public class SchemaResource extends ResourceBase {
   @PUT
   @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
-  public Response put(final TableSchemaModel model, 
+  public Response put(final TableSchemaModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     return update(model, true, uriInfo);
@@ -210,10 +210,10 @@ public class SchemaResource extends ResourceBase {
   @POST
   @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
-  public Response post(final TableSchemaModel model, 
+  public Response post(final TableSchemaModel model,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("PUT " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("PUT " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     return update(model, false, uriInfo);
@@ -223,8 +223,8 @@ public class SchemaResource extends ResourceBase {
       justification="Expected")
   @DELETE
   public Response delete(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("DELETE " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("DELETE " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     if (servlet.isReadOnly()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
index a7e52bd..27977c3 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java
@@ -63,8 +63,8 @@ public class StorageClusterStatusResource extends ResourceBase {
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
index 85e81f8..b9fb5d4 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java
@@ -58,8 +58,8 @@ public class StorageClusterVersionResource extends ResourceBase {
   @GET
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON})
   public Response get(final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
index 45e9125..70a4538 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java
@@ -133,7 +133,7 @@ public class TableResource extends ResourceBase {
       @DefaultValue("-1") @QueryParam(Constants.SCAN_BATCH_SIZE) int batchSize,
       @DefaultValue("0") @QueryParam(Constants.SCAN_START_TIME) long startTime,
       @DefaultValue(Long.MAX_VALUE + "") @QueryParam(Constants.SCAN_END_TIME) long endTime,
-      @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks, 
+      @DefaultValue("true") @QueryParam(Constants.SCAN_BATCH_SIZE) boolean cacheBlocks,
       @DefaultValue("") @QueryParam(Constants.SCAN_FILTER) String filters) {
     try {
       Filter filter = null;
@@ -146,10 +146,12 @@ public class TableResource extends ResourceBase {
           tableScan.setStartRow(prefixBytes);
         }
       }
-      LOG.debug("Query parameters  : Table Name = > " + this.table + " Start Row => " + startRow
-          + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
-          + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
-          + maxVersions + " Batch Size => " + batchSize);
+      if (LOG.isTraceEnabled()) {
+        LOG.trace("Query parameters  : Table Name = > " + this.table + " Start Row => " + startRow
+            + " End Row => " + endRow + " Columns => " + column + " Start Time => " + startTime
+            + " End Time => " + endTime + " Cache Blocks => " + cacheBlocks + " Max Versions => "
+            + maxVersions + " Batch Size => " + batchSize);
+      }
       Table hTable = RESTServlet.getInstance().getTable(this.table);
       tableScan.setBatch(batchSize);
       tableScan.setMaxVersions(maxVersions);
@@ -162,15 +164,21 @@ public class TableResource extends ResourceBase {
         String[] familysplit = csplit.trim().split(":");
         if (familysplit.length == 2) {
           if (familysplit[1].length() > 0) {
-            LOG.debug("Scan family and column : " + familysplit[0] + "  " + familysplit[1]);
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Scan family and column : " + familysplit[0] + "  " + familysplit[1]);
+            }
             tableScan.addColumn(Bytes.toBytes(familysplit[0]), Bytes.toBytes(familysplit[1]));
           } else {
             tableScan.addFamily(Bytes.toBytes(familysplit[0]));
-            LOG.debug("Scan family : " + familysplit[0] + " and empty qualifier.");
+            if (LOG.isTraceEnabled()) {
+              LOG.trace("Scan family : " + familysplit[0] + " and empty qualifier.");
+            }
             tableScan.addColumn(Bytes.toBytes(familysplit[0]), null);
           }
-        } else if (StringUtils.isNotEmpty(familysplit[0])){
-          LOG.debug("Scan family : " + familysplit[0]);
+        } else if (StringUtils.isNotEmpty(familysplit[0])) {
+          if (LOG.isTraceEnabled()) {
+            LOG.trace("Scan family : " + familysplit[0]);
+          }
           tableScan.addFamily(Bytes.toBytes(familysplit[0]));
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
index ae93825..172246c 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java
@@ -68,15 +68,15 @@ public class VersionResource extends ResourceBase {
    * Build a response for a version request.
    * @param context servlet context
    * @param uriInfo (JAX-RS context variable) request URL
-   * @return a response for a version request 
+   * @return a response for a version request
    */
   @GET
   @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF,
     MIMETYPE_PROTOBUF_IETF})
-  public Response get(final @Context ServletContext context, 
+  public Response get(final @Context ServletContext context,
       final @Context UriInfo uriInfo) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("GET " + uriInfo.getAbsolutePath());
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("GET " + uriInfo.getAbsolutePath());
     }
     servlet.getMetrics().incrementRequests(1);
     ResponseBuilder response = Response.ok(new VersionModel(context));
@@ -89,7 +89,7 @@ public class VersionResource extends ResourceBase {
    * Dispatch to StorageClusterVersionResource
    */
   @Path("cluster")
-  public StorageClusterVersionResource getClusterVersionResource() 
+  public StorageClusterVersionResource getClusterVersionResource()
       throws IOException {
     return new StorageClusterVersionResource();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
index 142c276..e26de63 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java
@@ -101,10 +101,10 @@ public class Client {
   }
 
   /**
-   * Shut down the client. Close any open persistent connections. 
+   * Shut down the client. Close any open persistent connections.
    */
   public void shutdown() {
-    MultiThreadedHttpConnectionManager manager = 
+    MultiThreadedHttpConnectionManager manager =
       (MultiThreadedHttpConnectionManager) httpClient.getHttpConnectionManager();
     manager.shutdown();
   }
@@ -151,7 +151,7 @@ public class Client {
    * one of the members of the supplied cluster definition and iterate through
    * the list until a transaction can be successfully completed. The
    * definition of success here is a complete HTTP transaction, irrespective
-   * of result code.  
+   * of result code.
    * @param cluster the cluster definition
    * @param method the transaction method
    * @param headers HTTP header values to send
@@ -209,8 +209,8 @@ public class Client {
     long startTime = System.currentTimeMillis();
     int code = httpClient.executeMethod(method);
     long endTime = System.currentTimeMillis();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(method.getName() + " " + uri + " " + code + " " +
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(method.getName() + " " + uri + " " + code + " " +
         method.getStatusText() + " in " + (endTime - startTime) + " ms");
     }
     return code;
@@ -250,7 +250,7 @@ public class Client {
   }
 
   /**
-   * Send a HEAD request 
+   * Send a HEAD request
    * @param path the path or URI
    * @return a Response object with response detail
    * @throws IOException
@@ -260,14 +260,14 @@ public class Client {
   }
 
   /**
-   * Send a HEAD request 
+   * Send a HEAD request
    * @param cluster the cluster definition
    * @param path the path or URI
    * @param headers the HTTP headers to include in the request
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response head(Cluster cluster, String path, Header[] headers) 
+  public Response head(Cluster cluster, String path, Header[] headers)
       throws IOException {
     HeadMethod method = new HeadMethod();
     try {
@@ -280,7 +280,7 @@ public class Client {
   }
 
   /**
-   * Send a GET request 
+   * Send a GET request
    * @param path the path or URI
    * @return a Response object with response detail
    * @throws IOException
@@ -290,7 +290,7 @@ public class Client {
   }
 
   /**
-   * Send a GET request 
+   * Send a GET request
    * @param cluster the cluster definition
    * @param path the path or URI
    * @return a Response object with response detail
@@ -301,7 +301,7 @@ public class Client {
   }
 
   /**
-   * Send a GET request 
+   * Send a GET request
    * @param path the path or URI
    * @param accept Accept header value
    * @return a Response object with response detail
@@ -312,7 +312,7 @@ public class Client {
   }
 
   /**
-   * Send a GET request 
+   * Send a GET request
    * @param cluster the cluster definition
    * @param path the path or URI
    * @param accept Accept header value
@@ -329,7 +329,7 @@ public class Client {
   /**
    * Send a GET request
    * @param path the path or URI
-   * @param headers the HTTP headers to include in the request, 
+   * @param headers the HTTP headers to include in the request,
    * <tt>Accept</tt> must be supplied
    * @return a Response object with response detail
    * @throws IOException
@@ -346,7 +346,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response get(Cluster c, String path, Header[] headers) 
+  public Response get(Cluster c, String path, Header[] headers)
       throws IOException {
     GetMethod method = new GetMethod();
     try {
@@ -396,7 +396,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException for error
    */
-  public Response put(Cluster cluster, String path, String contentType, 
+  public Response put(Cluster cluster, String path, String contentType,
       byte[] content) throws IOException {
     Header[] headers = new Header[1];
     headers[0] = new Header("Content-Type", contentType);
@@ -413,7 +413,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException for error
    */
-  public Response put(Cluster cluster, String path, String contentType, 
+  public Response put(Cluster cluster, String path, String contentType,
       byte[] content, Header extraHdr) throws IOException {
     int cnt = extraHdr == null ? 1 : 2;
     Header[] headers = new Header[cnt];
@@ -433,7 +433,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response put(String path, Header[] headers, byte[] content) 
+  public Response put(String path, Header[] headers, byte[] content)
       throws IOException {
     return put(cluster, path, headers, content);
   }
@@ -448,7 +448,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response put(Cluster cluster, String path, Header[] headers, 
+  public Response put(Cluster cluster, String path, Header[] headers,
       byte[] content) throws IOException {
     PutMethod method = new PutMethod();
     try {
@@ -498,7 +498,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException for error
    */
-  public Response post(Cluster cluster, String path, String contentType, 
+  public Response post(Cluster cluster, String path, String contentType,
       byte[] content) throws IOException {
     Header[] headers = new Header[1];
     headers[0] = new Header("Content-Type", contentType);
@@ -515,7 +515,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException for error
    */
-  public Response post(Cluster cluster, String path, String contentType, 
+  public Response post(Cluster cluster, String path, String contentType,
       byte[] content, Header extraHdr) throws IOException {
     int cnt = extraHdr == null ? 1 : 2;
     Header[] headers = new Header[cnt];
@@ -535,7 +535,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response post(String path, Header[] headers, byte[] content) 
+  public Response post(String path, Header[] headers, byte[] content)
       throws IOException {
     return post(cluster, path, headers, content);
   }
@@ -550,7 +550,7 @@ public class Client {
    * @return a Response object with response detail
    * @throws IOException
    */
-  public Response post(Cluster cluster, String path, Header[] headers, 
+  public Response post(Cluster cluster, String path, Header[] headers,
       byte[] content) throws IOException {
     PostMethod method = new PostMethod();
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
index e5208af..f051bc8 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java
@@ -72,7 +72,9 @@ public class AuthFilter extends AuthenticationFilter {
             throw new ServletException("Failed to retrieve server principal", ie);
           }
         }
-        LOG.debug("Setting property " + name + "=" + value);
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("Setting property " + name + "=" + value);
+        }
         name = name.substring(REST_PREFIX_LEN);
         props.setProperty(name, value);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
index 30eea95..dbb1447 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java
@@ -34,13 +34,12 @@ import javax.servlet.ServletResponse;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 /**
  * This filter provides protection against cross site request forgery (CSRF)
  * attacks for REST APIs. Enabling this filter on an endpoint results in the
@@ -52,8 +51,8 @@ import org.slf4j.LoggerFactory;
 @InterfaceStability.Evolving
 public class RestCsrfPreventionFilter implements Filter {
 
-  private static final Logger LOG =
-      LoggerFactory.getLogger(RestCsrfPreventionFilter.class);
+  private static final Log LOG =
+      LogFactory.getLog(RestCsrfPreventionFilter.class);
 
   public static final String HEADER_USER_AGENT = "User-Agent";
   public static final String BROWSER_USER_AGENT_PARAM =
@@ -87,9 +86,9 @@ public class RestCsrfPreventionFilter implements Filter {
       agents = BROWSER_USER_AGENTS_DEFAULT;
     }
     parseBrowserUserAgents(agents);
-    LOG.info("Adding cross-site request forgery (CSRF) protection, "
-        + "headerName = {}, methodsToIgnore = {}, browserUserAgents = {}",
-        headerName, methodsToIgnore, browserUserAgents);
+    LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, "
+        + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s",
+        headerName, methodsToIgnore, browserUserAgents));
   }
 
   void parseBrowserUserAgents(String userAgents) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
index ec39db0..073c038 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/provider/consumer/ProtobufMessageBodyConsumer.java
@@ -45,7 +45,7 @@ import org.apache.hadoop.hbase.rest.ProtobufMessageHandler;
 @Provider
 @Consumes({Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF})
 @InterfaceAudience.Private
-public class ProtobufMessageBodyConsumer 
+public class ProtobufMessageBodyConsumer
     implements MessageBodyReader<ProtobufMessageHandler> {
   private static final Log LOG =
     LogFactory.getLog(ProtobufMessageBodyConsumer.class);
@@ -73,8 +73,8 @@ public class ProtobufMessageBodyConsumer
           baos.write(buffer, 0, read);
         }
       } while (read > 0);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(getClass() + ": read " + baos.size() + " bytes from " +
+      if (LOG.isTraceEnabled()) {
+        LOG.trace(getClass() + ": read " + baos.size() + " bytes from " +
           inputStream);
       }
       obj = obj.getObjectFromMessage(baos.toByteArray());

http://git-wip-us.apache.org/repos/asf/hbase/blob/3d7840a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
index e8a32d9..a860f20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ConnectionCache.java
@@ -23,6 +23,7 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.locks.Lock;
 
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ScheduledChore;
@@ -37,7 +38,7 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.log4j.Logger;
+import org.apache.commons.logging.LogFactory;
 
 /**
  * A utility to store user specific HConnections in memory.
@@ -47,7 +48,7 @@ import org.apache.log4j.Logger;
  */
 @InterfaceAudience.Private
 public class ConnectionCache {
-  private static final Logger LOG = Logger.getLogger(ConnectionCache.class);
+  private static final Log LOG = LogFactory.getLog(ConnectionCache.class);
 
   private final Map<String, ConnectionInfo>
    connections = new ConcurrentHashMap<String, ConnectionInfo>();
@@ -60,6 +61,7 @@ public class ConnectionCache {
 
   private final ThreadLocal<String> effectiveUserNames =
       new ThreadLocal<String>() {
+    @Override
     protected String initialValue() {
       return realUserName;
     }


[18/50] hbase git commit: HBASE-15927 Remove HMaster.assignRegion()

Posted by sy...@apache.org.
HBASE-15927 Remove HMaster.assignRegion()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f0c159b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f0c159b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f0c159b5

Branch: refs/heads/hbase-12439
Commit: f0c159b5fed732cdc08bd2fd924ae81677d66a30
Parents: 0cbce07
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Fri Jun 3 12:36:25 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Fri Jun 3 12:36:25 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/HMaster.java |  4 --
 .../hadoop/hbase/HBaseTestingUtility.java       | 15 +++++
 .../hbase/client/TestMetaWithReplicas.java      |  2 +-
 .../master/TestAssignmentManagerOnCluster.java  | 61 ++++++++------------
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   | 20 +++----
 .../hadoop/hbase/util/TestHBaseFsckTwoRS.java   | 23 +++-----
 6 files changed, 58 insertions(+), 67 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f8d0003..d368ffb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2378,10 +2378,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     return this.initializationBeforeMetaAssignment;
   }
 
-  public void assignRegion(HRegionInfo hri) {
-    assignmentManager.assign(hri);
-  }
-
   /**
    * Compute the average load across all region servers.
    * Currently, this uses a very naive computation - just uses the number of

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 7b35815..bfa14cb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -3200,6 +3200,21 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
   }
 
   /**
+   * Uses directly the assignment manager to assign the region.
+   * and waits until the specified region has completed assignment.
+   * @param tableName the table name
+   * @throws IOException
+   * @throw InterruptedException
+   * @return true if the region is assigned false otherwise.
+   */
+  public boolean assignRegion(final HRegionInfo regionInfo)
+      throws IOException, InterruptedException {
+    final AssignmentManager am = getHBaseCluster().getMaster().getAssignmentManager();
+    am.assign(regionInfo);
+    return am.waitForAssignment(regionInfo);
+  }
+
+  /**
    * Wait until all regions for a table in hbase:meta have a non-empty
    * info:server, up to a configuable timeout value (default is 60 seconds)
    * This means all regions have been deployed,

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
index 5e302d2..8e87ceb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMetaWithReplicas.java
@@ -448,7 +448,7 @@ public class TestMetaWithReplicas {
     // create in-memory state otherwise master won't assign
     TEST_UTIL.getMiniHBaseCluster().getMaster().getAssignmentManager()
              .getRegionStates().createRegionState(h);
-    TEST_UTIL.getMiniHBaseCluster().getMaster().assignRegion(h);
+    TEST_UTIL.assignRegion(h);
     HBaseFsckRepair.waitUntilAssigned(TEST_UTIL.getHBaseAdmin(), h);
     // check that problem exists
     HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
index 95ef6d5..7d3d2e9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestAssignmentManagerOnCluster.java
@@ -191,9 +191,8 @@ public class TestAssignmentManagerOnCluster {
       MetaTableAccessor.addRegionToMeta(meta, hri);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      am.waitForAssignment(hri);
+      TEST_UTIL.assignRegion(hri);
 
       RegionStates regionStates = am.getRegionStates();
       ServerName serverName = regionStates.getRegionServerOfRegion(hri);
@@ -248,7 +247,7 @@ public class TestAssignmentManagerOnCluster {
       final AssignmentManager am = master.getAssignmentManager();
       RegionPlan plan = new RegionPlan(hri, null, deadServer);
       am.addPlan(hri.getEncodedName(), plan);
-      master.assignRegion(hri);
+      TEST_UTIL.assignRegion(hri);
 
       TEST_UTIL.waitFor(60000, new Waiter.Predicate<Exception>() {
         @Override
@@ -446,9 +445,8 @@ public class TestAssignmentManagerOnCluster {
       MetaTableAccessor.addRegionToMeta(meta, hri);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
 
       ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri);
       TEST_UTIL.assertRegionOnServer(hri, sn, 6000);
@@ -495,9 +493,8 @@ public class TestAssignmentManagerOnCluster {
       MetaTableAccessor.addRegionToMeta(meta, hri);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
       ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri);
       TEST_UTIL.assertRegionOnServer(hri, sn, 6000);
 
@@ -543,9 +540,8 @@ public class TestAssignmentManagerOnCluster {
       MyLoadBalancer.controledRegion = hri;
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      assertFalse(am.waitForAssignment(hri));
+      assertFalse(TEST_UTIL.assignRegion(hri));
 
       RegionState state = am.getRegionStates().getRegionState(hri);
       assertEquals(RegionState.State.FAILED_OPEN, state.getState());
@@ -553,8 +549,7 @@ public class TestAssignmentManagerOnCluster {
       assertNull(state.getServerName());
 
       MyLoadBalancer.controledRegion = null;
-      master.assignRegion(hri);
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
 
       ServerName serverName = master.getAssignmentManager().
         getRegionStates().getRegionServerOfRegion(hri);
@@ -581,17 +576,15 @@ public class TestAssignmentManagerOnCluster {
         desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
       MetaTableAccessor.addRegionToMeta(meta, hri);
 
-      MyLoadBalancer.controledRegion = hri;
-
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
       AssignmentManager am = master.getAssignmentManager();
+
       // round-robin assignment but balancer cannot find a plan
       // assignment should fail
-      am.assign(Arrays.asList(hri));
-
+      MyLoadBalancer.controledRegion = hri;
       // if bulk assignment cannot update region state to online
       // or failed_open this waits until timeout
-      assertFalse(am.waitForAssignment(hri));
+      assertFalse(TEST_UTIL.assignRegion(hri));
       RegionState state = am.getRegionStates().getRegionState(hri);
       assertEquals(RegionState.State.FAILED_OPEN, state.getState());
       // Failed to open since no plan, so it's on no server
@@ -599,8 +592,7 @@ public class TestAssignmentManagerOnCluster {
 
       // try again with valid plan
       MyLoadBalancer.controledRegion = null;
-      am.assign(Arrays.asList(hri));
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
 
       ServerName serverName = master.getAssignmentManager().
         getRegionStates().getRegionServerOfRegion(hri);
@@ -689,9 +681,8 @@ public class TestAssignmentManagerOnCluster {
       fs.create(regionDir, true);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      assertFalse(am.waitForAssignment(hri));
+      assertFalse(TEST_UTIL.assignRegion(hri));
 
       RegionState state = am.getRegionStates().getRegionState(hri);
       assertEquals(RegionState.State.FAILED_OPEN, state.getState());
@@ -702,8 +693,7 @@ public class TestAssignmentManagerOnCluster {
 
       // remove the blocking file, so that region can be opened
       fs.delete(regionDir, true);
-      master.assignRegion(hri);
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
 
       ServerName serverName = master.getAssignmentManager().
         getRegionStates().getRegionServerOfRegion(hri);
@@ -768,9 +758,8 @@ public class TestAssignmentManagerOnCluster {
       MetaTableAccessor.addRegionToMeta(meta, hri);
 
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
       ServerName sn = am.getRegionStates().getRegionServerOfRegion(hri);
       TEST_UTIL.assertRegionOnServer(hri, sn, 6000);
 
@@ -816,8 +805,9 @@ public class TestAssignmentManagerOnCluster {
       MyRegionObserver.postOpenEnabled.set(true);
       MyRegionObserver.postOpenCalled = false;
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
+      AssignmentManager am = master.getAssignmentManager();
       // Region will be opened, but it won't complete
-      master.assignRegion(hri);
+      am.assign(hri);
       long end = EnvironmentEdgeManager.currentTime() + 20000;
       // Wait till postOpen is called
       while (!MyRegionObserver.postOpenCalled ) {
@@ -826,7 +816,6 @@ public class TestAssignmentManagerOnCluster {
         Thread.sleep(300);
       }
 
-      AssignmentManager am = master.getAssignmentManager();
       // Now let's unassign it, it should do nothing
       am.unassign(hri);
       RegionState state = am.getRegionStates().getRegionState(hri);
@@ -887,12 +876,14 @@ public class TestAssignmentManagerOnCluster {
 
       // Assign the region
       master = (MyMaster)cluster.getMaster();
-      master.assignRegion(hri);
+      AssignmentManager am = master.getAssignmentManager();
+
+      am.assign(hri);
 
       // Hold SSH before killing the hosting server
       master.enableSSH(false);
 
-      AssignmentManager am = master.getAssignmentManager();
+
       RegionStates regionStates = am.getRegionStates();
       ServerName metaServer = regionStates.getRegionServerOfRegion(
         HRegionInfo.FIRST_META_REGIONINFO);
@@ -1015,10 +1006,9 @@ public class TestAssignmentManagerOnCluster {
 
       // Assign the region
       master = (MyMaster)cluster.getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
       RegionStates regionStates = am.getRegionStates();
-      assertTrue(am.waitForAssignment(hri));
+      assertTrue(TEST_UTIL.assignRegion(hri));
 
       // Disable the table
       admin.disableTable(table);
@@ -1056,9 +1046,9 @@ public class TestAssignmentManagerOnCluster {
 
       // Assign the region
       master = (MyMaster)cluster.getMaster();
-      master.assignRegion(hri);
-
       AssignmentManager am = master.getAssignmentManager();
+      am.assign(hri);
+
       RegionStates regionStates = am.getRegionStates();
       ServerName metaServer = regionStates.getRegionServerOfRegion(
         HRegionInfo.FIRST_META_REGIONINFO);
@@ -1129,9 +1119,9 @@ public class TestAssignmentManagerOnCluster {
 
       // Assign the region
       master = (MyMaster)cluster.getMaster();
-      master.assignRegion(hri);
-
       AssignmentManager am = master.getAssignmentManager();
+      am.assign(hri);
+
       RegionStates regionStates = am.getRegionStates();
       ServerName metaServer = regionStates.getRegionServerOfRegion(
         HRegionInfo.FIRST_META_REGIONINFO);
@@ -1196,9 +1186,8 @@ public class TestAssignmentManagerOnCluster {
           new HRegionInfo(desc.getTableName(), Bytes.toBytes("A"), Bytes.toBytes("Z"));
       MetaTableAccessor.addRegionToMeta(meta, hri);
       HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-      master.assignRegion(hri);
       AssignmentManager am = master.getAssignmentManager();
-      am.waitForAssignment(hri);
+      TEST_UTIL.assignRegion(hri);
       RegionStates regionStates = am.getRegionStates();
       ServerName serverName = regionStates.getRegionServerOfRegion(hri);
       // Assert the the region is actually open on the server

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 866a12d..2140b39 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -310,9 +310,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
 
       HRegionInfo hriOverlap =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriOverlap);
+      TEST_UTIL.assignRegion(hriOverlap);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriOverlap);
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
@@ -350,9 +349,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
       // Mess it up by creating an overlap in the metadata
       HRegionInfo hriOverlap =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B2"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriOverlap);
+      TEST_UTIL.assignRegion(hriOverlap);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriOverlap);
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
@@ -1225,9 +1223,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
 
       HRegionInfo hriOverlap =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriOverlap);
+      TEST_UTIL.assignRegion(hriOverlap);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriOverlap);
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
@@ -1351,9 +1348,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
       // Now let's mess it up, by adding a region with a duplicate startkey
       HRegionInfo hriDupe =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("B"), Bytes.toBytes("B"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriDupe);
+      TEST_UTIL.assignRegion(hriDupe);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriDupe);
       TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/f0c159b5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
index 7f023e0..17ac778 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckTwoRS.java
@@ -136,9 +136,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
       // Now let's mess it up, by adding a region with a duplicate startkey
       HRegionInfo hriDupe =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("A2"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriDupe);
+      TEST_UTIL.assignRegion(hriDupe);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriDupe);
       TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT);
 
@@ -176,10 +175,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
       // Now let's mess it up, by adding a region with a duplicate startkey
       HRegionInfo hriDupe =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("B"));
+      TEST_UTIL.assignRegion(hriDupe);
 
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriDupe);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriDupe);
       ServerName server = regionStates.getRegionServerOfRegion(hriDupe);
       TEST_UTIL.assertRegionOnServer(hriDupe, server, REGION_ONLINE_TIMEOUT);
 
@@ -228,9 +225,8 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
       // Mess it up by creating an overlap in the metadata
       HRegionInfo hriOverlap =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A2"), Bytes.toBytes("B"));
-      TEST_UTIL.getHBaseCluster().getMaster().assignRegion(hriOverlap);
-      TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager()
-          .waitForAssignment(hriOverlap);
+      TEST_UTIL.assignRegion(hriOverlap);
+
       ServerName server = regionStates.getRegionServerOfRegion(hriOverlap);
       TEST_UTIL.assertRegionOnServer(hriOverlap, server, REGION_ONLINE_TIMEOUT);
 
@@ -317,12 +313,11 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
       HMaster master = cluster.getMaster();
       HRegionInfo hriOverlap1 =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("A"), Bytes.toBytes("AB"));
-      master.assignRegion(hriOverlap1);
-      master.getAssignmentManager().waitForAssignment(hriOverlap1);
+      TEST_UTIL.assignRegion(hriOverlap1);
+
       HRegionInfo hriOverlap2 =
           createRegion(tbl.getTableDescriptor(), Bytes.toBytes("AB"), Bytes.toBytes("B"));
-      master.assignRegion(hriOverlap2);
-      master.getAssignmentManager().waitForAssignment(hriOverlap2);
+      TEST_UTIL.assignRegion(hriOverlap2);
 
       HBaseFsck hbck = doFsck(conf, false);
       assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {HBaseFsck.ErrorReporter.ERROR_CODE.DUPE_STARTKEYS,
@@ -458,7 +453,7 @@ public class TestHBaseFsckTwoRS extends BaseTestHBaseFsck {
     scanner.close();
     meta.close();
   }
-  
+
   /**
    * This creates and fixes a bad table with a missing region -- hole in meta and data present but
    * .regioninfo missing (an orphan hdfs region)in the fs. At last we check every row was present


[22/50] hbase git commit: HBASE-15883 Adding WAL files and tracking offsets in HBase.

Posted by sy...@apache.org.
HBASE-15883 Adding WAL files and tracking offsets in HBase.

Implemented ReplicationQueuesHBaseImpl that tracks WAL offsets and replication queues in an HBase table.
Only wrote the basic tracking methods, have not implemented claimQueue() or HFileRef methods yet.
Wrote a basic unit test for ReplicationQueueHBaseImpl that tests the implemented functions on a single Region Server

Signed-off-by: Elliott Clark <el...@fb.com>
Signed-off-by: Elliott Clark <ec...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/21e98271
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/21e98271
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/21e98271

Branch: refs/heads/hbase-12439
Commit: 21e98271c32f0d44106515a72b2c92d518c03668
Parents: 9a53d8b
Author: Joseph Hwang <jz...@fb.com>
Authored: Thu May 19 17:14:33 2016 -0700
Committer: Elliott Clark <ec...@apache.org>
Committed: Fri Jun 3 15:23:10 2016 -0700

----------------------------------------------------------------------
 .../hbase/replication/ReplicationFactory.java   |  11 +-
 .../hbase/replication/ReplicationQueues.java    |   8 +-
 .../replication/ReplicationQueuesArguments.java |  66 +++
 .../replication/ReplicationQueuesHBaseImpl.java | 491 +++++++++++++++++++
 .../replication/ReplicationQueuesZKImpl.java    |  13 +-
 .../replication/regionserver/Replication.java   |  12 +-
 .../regionserver/ReplicationSourceManager.java  |   5 +-
 .../replication/TestReplicationAdmin.java       |   3 +-
 .../hbase/master/cleaner/TestLogsCleaner.java   |   3 +-
 .../cleaner/TestReplicationHFileCleaner.java    |   4 +-
 .../replication/TestReplicationStateBasic.java  |   2 +-
 .../TestReplicationStateHBaseImpl.java          | 243 +++++++++
 .../replication/TestReplicationStateZKImpl.java |  13 +-
 .../TestReplicationSourceManager.java           |  36 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |   4 +-
 15 files changed, 871 insertions(+), 43 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 91e77ca..e264a4d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.commons.lang.reflect.ConstructorUtils;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
@@ -30,9 +31,11 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 @InterfaceAudience.Private
 public class ReplicationFactory {
 
-  public static ReplicationQueues getReplicationQueues(final ZooKeeperWatcher zk,
-      Configuration conf, Abortable abortable) {
-    return new ReplicationQueuesZKImpl(zk, conf, abortable);
+  public static ReplicationQueues getReplicationQueues(ReplicationQueuesArguments args)
+      throws Exception {
+    Class<?> classToBuild = args.getConf().getClass("hbase.region.replica." +
+        "replication.ReplicationQueuesType", ReplicationQueuesZKImpl.class);
+    return (ReplicationQueues) ConstructorUtils.invokeConstructor(classToBuild, args);
   }
 
   public static ReplicationQueuesClient getReplicationQueuesClient(final ZooKeeperWatcher zk,
@@ -44,7 +47,7 @@ public class ReplicationFactory {
       Abortable abortable) {
     return getReplicationPeers(zk, conf, null, abortable);
   }
-  
+
   public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf,
       final ReplicationQueuesClient queuesClient, Abortable abortable) {
     return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
index 0d47a88..db6da91 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
@@ -83,13 +83,13 @@ public interface ReplicationQueues {
   /**
    * Get a list of all WALs in the given queue.
    * @param queueId a String that identifies the queue
-   * @return a list of WALs, null if this region server is dead and has no outstanding queues
+   * @return a list of WALs, null if no such queue exists for this server
    */
   List<String> getLogsInQueue(String queueId);
 
   /**
    * Get a list of all queues for this region server.
-   * @return a list of queueIds, null if this region server is dead and has no outstanding queues
+   * @return a list of queueIds, an empty list if this region server is dead and has no outstanding queues
    */
   List<String> getAllQueues();
 
@@ -110,10 +110,10 @@ public interface ReplicationQueues {
 
   /**
    * Checks if the provided znode is the same as this region server's
-   * @param znode to check
+   * @param regionserver the id of the region server
    * @return if this is this rs's znode
    */
-  boolean isThisOurZnode(String znode);
+  boolean isThisOurRegionServer(String regionserver);
 
   /**
    * Add a peer to hfile reference queue if peer does not exist.

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
new file mode 100644
index 0000000..4907b73
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
@@ -0,0 +1,66 @@
+/*
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+
+@InterfaceAudience.Private
+public class ReplicationQueuesArguments {
+
+  private ZooKeeperWatcher zk;
+  private Configuration conf;
+  private Abortable abort;
+
+  public ReplicationQueuesArguments(Configuration conf, Abortable abort) {
+    this.conf = conf;
+    this.abort = abort;
+  }
+
+  public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZooKeeperWatcher zk) {
+    this(conf, abort);
+    setZk(zk);
+  }
+
+  public ZooKeeperWatcher getZk() {
+    return zk;
+  }
+
+  public void setZk(ZooKeeperWatcher zk) {
+    this.zk = zk;
+  }
+
+  public Configuration getConf() {
+    return conf;
+  }
+
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  public Abortable getAbort() {
+    return abort;
+  }
+
+  public void setAbort(Abortable abort) {
+    this.abort = abort;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
new file mode 100644
index 0000000..bbc9e32
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
@@ -0,0 +1,491 @@
+/*
+*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.hbase.replication;
+
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.FilterList;
+import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.regionserver.BloomType;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RetryCounter;
+import org.apache.hadoop.hbase.util.RetryCounterFactory;
+import sun.reflect.generics.reflectiveObjects.NotImplementedException;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.SortedMap;
+import java.util.SortedSet;
+
+@InterfaceAudience.Private
+public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
+
+  /** Name of the HBase Table used for tracking replication*/
+  public static final TableName REPLICATION_TABLE_NAME =
+    TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "replication");
+
+  // Column family and column names for the Replication Table
+  private static final byte[] CF = Bytes.toBytes("r");
+  private static final byte[] COL_OWNER = Bytes.toBytes("o");
+  private static final byte[] COL_QUEUE_ID = Bytes.toBytes("q");
+
+  // Column Descriptor for the Replication Table
+  private static final HColumnDescriptor REPLICATION_COL_DESCRIPTOR =
+    new HColumnDescriptor(CF).setMaxVersions(1)
+      .setInMemory(true)
+      .setScope(HConstants.REPLICATION_SCOPE_LOCAL)
+        // TODO: Figure out which bloom filter to use
+      .setBloomFilterType(BloomType.NONE)
+      .setCacheDataInL1(true);
+
+  // Common byte values used in replication offset tracking
+  private static final byte[] INITIAL_OFFSET = Bytes.toBytes(0L);
+
+  /*
+   * Make sure that HBase table operations for replication have a high number of retries. This is
+   * because the server is aborted if any HBase table operation fails. Each RPC will be attempted
+   * 3600 times before exiting. This provides each operation with 2 hours of retries
+   * before the server is aborted.
+   */
+  private static final int CLIENT_RETRIES = 3600;
+  private static final int RPC_TIMEOUT = 2000;
+  private static final int OPERATION_TIMEOUT = CLIENT_RETRIES * RPC_TIMEOUT;
+
+  private final Configuration conf;
+  private final Admin admin;
+  private final Connection connection;
+  private final Table replicationTable;
+  private final Abortable abortable;
+  private String serverName = null;
+  private byte[] serverNameBytes = null;
+
+  public ReplicationQueuesHBaseImpl(ReplicationQueuesArguments args) throws IOException {
+    this(args.getConf(), args.getAbort());
+  }
+
+  public ReplicationQueuesHBaseImpl(Configuration conf, Abortable abort) throws IOException {
+    this.conf = new Configuration(conf);
+    // Modify the connection's config so that the Replication Table it returns has a much higher
+    // number of client retries
+    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES);
+    this.connection = ConnectionFactory.createConnection(conf);
+    this.admin = connection.getAdmin();
+    this.abortable = abort;
+    replicationTable = createAndGetReplicationTable();
+    replicationTable.setRpcTimeout(RPC_TIMEOUT);
+    replicationTable.setOperationTimeout(OPERATION_TIMEOUT);
+  }
+
+  @Override
+  public void init(String serverName) throws ReplicationException {
+    this.serverName = serverName;
+    this.serverNameBytes = Bytes.toBytes(serverName);
+  }
+
+  @Override
+  public void removeQueue(String queueId) {
+    try {
+      byte[] rowKey = this.queueIdToRowKey(queueId);
+      // The rowkey will be null if the queue cannot be found in the Replication Table
+      if (rowKey == null) {
+        String errMsg = "Could not remove non-existent queue with queueId=" + queueId;
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+        return;
+      }
+      Delete deleteQueue = new Delete(rowKey);
+      safeQueueUpdate(deleteQueue);
+    } catch (IOException e) {
+      abortable.abort("Could not remove queue with queueId=" + queueId, e);
+    }
+  }
+
+  @Override
+  public void addLog(String queueId, String filename) throws ReplicationException {
+    try {
+      // Check if the queue info (Owner, QueueId) is currently stored in the Replication Table
+      if (this.queueIdToRowKey(queueId) == null) {
+        // Each queue will have an Owner, QueueId, and a collection of [WAL:offset] key values.
+        Put putNewQueue = new Put(Bytes.toBytes(buildServerQueueName(queueId)));
+        putNewQueue.addColumn(CF, COL_OWNER, Bytes.toBytes(serverName));
+        putNewQueue.addColumn(CF, COL_QUEUE_ID, Bytes.toBytes(queueId));
+        putNewQueue.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET);
+        replicationTable.put(putNewQueue);
+      } else {
+        // Otherwise simply add the new log and offset as a new column
+        Put putNewLog = new Put(this.queueIdToRowKey(queueId));
+        putNewLog.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET);
+        safeQueueUpdate(putNewLog);
+      }
+    } catch (IOException e) {
+      abortable.abort("Could not add queue queueId=" + queueId + " filename=" + filename, e);
+    }
+  }
+
+  @Override
+  public void removeLog(String queueId, String filename) {
+    try {
+      byte[] rowKey = this.queueIdToRowKey(queueId);
+      if (rowKey == null) {
+        String errMsg = "Could not remove log from non-existent queueId=" + queueId + ", filename="
+          + filename;
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+        return;
+      }
+      Delete delete = new Delete(rowKey);
+      delete.addColumns(CF, Bytes.toBytes(filename));
+      safeQueueUpdate(delete);
+    } catch (IOException e) {
+      abortable.abort("Could not remove log from queueId=" + queueId + ", filename=" + filename, e);
+    }
+  }
+
+  @Override
+  public void setLogPosition(String queueId, String filename, long position) {
+    try {
+      byte[] rowKey = this.queueIdToRowKey(queueId);
+      if (rowKey == null) {
+        String errMsg = "Could not set position of log from non-existent queueId=" + queueId +
+          ", filename=" + filename;
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+        return;
+      }
+      // Check that the log exists. addLog() must have been called before setLogPosition().
+      Get checkLogExists = new Get(rowKey);
+      checkLogExists.addColumn(CF, Bytes.toBytes(filename));
+      if (!replicationTable.exists(checkLogExists)) {
+        String errMsg = "Could not set position of non-existent log from queueId=" + queueId +
+          ", filename=" + filename;
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+        return;
+      }
+      // Update the log offset if it exists
+      Put walAndOffset = new Put(rowKey);
+      walAndOffset.addColumn(CF, Bytes.toBytes(filename), Bytes.toBytes(position));
+      safeQueueUpdate(walAndOffset);
+    } catch (IOException e) {
+      abortable.abort("Failed to write replication wal position (filename=" + filename +
+          ", position=" + position + ")", e);
+    }
+  }
+
+  @Override
+  public long getLogPosition(String queueId, String filename) throws ReplicationException {
+    try {
+      byte[] rowKey = this.queueIdToRowKey(queueId);
+      if (rowKey == null) {
+        throw new ReplicationException("Could not get position in log for non-existent queue " +
+            "queueId=" + queueId + ", filename=" + filename);
+      }
+      Get getOffset = new Get(rowKey);
+      getOffset.addColumn(CF, Bytes.toBytes(filename));
+      Result result = replicationTable.get(getOffset);
+      if (result.isEmpty()) {
+        throw new ReplicationException("Could not read empty result while getting log position " +
+            "queueId=" + queueId + ", filename=" + filename);
+      }
+      return Bytes.toLong(result.getValue(CF, Bytes.toBytes(filename)));
+    } catch (IOException e) {
+      throw new ReplicationException("Could not get position in log for queueId=" + queueId +
+          ", filename=" + filename);
+    }
+  }
+
+  @Override
+  public void removeAllQueues() {
+    List<String> myQueueIds = getAllQueues();
+    for (String queueId : myQueueIds) {
+      removeQueue(queueId);
+    }
+  }
+
+  @Override
+  public List<String> getLogsInQueue(String queueId) {
+    List<String> logs = new ArrayList<String>();
+    try {
+      byte[] rowKey = this.queueIdToRowKey(queueId);
+      if (rowKey == null) {
+        String errMsg = "Could not get logs from non-existent queueId=" + queueId;
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+        return null;
+      }
+      Get getQueue = new Get(rowKey);
+      Result queue = replicationTable.get(getQueue);
+      if (queue.isEmpty()) {
+        return null;
+      }
+      Map<byte[], byte[]> familyMap = queue.getFamilyMap(CF);
+      for (byte[] cQualifier : familyMap.keySet()) {
+        if (Arrays.equals(cQualifier, COL_OWNER) || Arrays.equals(cQualifier, COL_QUEUE_ID)) {
+          continue;
+        }
+        logs.add(Bytes.toString(cQualifier));
+      }
+    } catch (IOException e) {
+      abortable.abort("Could not get logs from queue queueId=" + queueId, e);
+      return null;
+    }
+    return logs;
+  }
+
+  @Override
+  public List<String> getAllQueues() {
+    try {
+      return this.getQueuesBelongingToServer(serverName);
+    } catch (IOException e) {
+      abortable.abort("Could not get all replication queues", e);
+      return null;
+    }
+  }
+
+  @Override
+  public SortedMap<String, SortedSet<String>> claimQueues(String regionserver) {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public List<String> getListOfReplicators() {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public boolean isThisOurRegionServer(String regionserver) {
+    return this.serverName.equals(regionserver);
+  }
+
+  @Override
+  public void addPeerToHFileRefs(String peerId) throws ReplicationException {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public void addHFileRefs(String peerId, List<String> files) throws ReplicationException {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  @Override
+  public void removeHFileRefs(String peerId, List<String> files) {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  /**
+   * Gets the Replication Table. Builds and blocks until the table is available if the Replication
+   * Table does not exist.
+   *
+   * @return the Replication Table
+   * @throws IOException if the Replication Table takes too long to build
+   */
+  private Table createAndGetReplicationTable() throws IOException {
+    if (!replicationTableExists()) {
+      createReplicationTable();
+    }
+    int maxRetries = conf.getInt("replication.queues.createtable.retries.number", 100);
+    RetryCounterFactory counterFactory = new RetryCounterFactory(maxRetries, 100);
+    RetryCounter retryCounter = counterFactory.create();
+    while (!replicationTableExists()) {
+      try {
+        retryCounter.sleepUntilNextRetry();
+        if (!retryCounter.shouldRetry()) {
+          throw new IOException("Unable to acquire the Replication Table");
+        }
+      } catch (InterruptedException e) {
+        return null;
+      }
+    }
+    return connection.getTable(REPLICATION_TABLE_NAME);
+  }
+
+  /**
+   * Checks whether the Replication Table exists yet
+   *
+   * @return whether the Replication Table exists
+   * @throws IOException
+   */
+  private boolean replicationTableExists() {
+    try {
+      return admin.tableExists(REPLICATION_TABLE_NAME);
+    } catch (IOException e) {
+      return false;
+    }
+  }
+
+  /**
+   * Create the replication table with the provided HColumnDescriptor REPLICATION_COL_DESCRIPTOR
+   * in ReplicationQueuesHBaseImpl
+   * @throws IOException
+   */
+  private void createReplicationTable() throws IOException {
+    HTableDescriptor replicationTableDescriptor = new HTableDescriptor(REPLICATION_TABLE_NAME);
+    replicationTableDescriptor.addFamily(REPLICATION_COL_DESCRIPTOR);
+    admin.createTable(replicationTableDescriptor);
+  }
+
+  /**
+   * Builds the unique identifier for a queue in the Replication table by appending the queueId to
+   * the servername
+   *
+   * @param queueId a String that identifies the queue
+   * @return unique identifier for a queue in the Replication table
+   */
+  private String buildServerQueueName(String queueId) {
+    return serverName + "-" + queueId;
+  }
+  
+  /**
+   * See safeQueueUpdate(RowMutations mutate)
+   * @param put Row mutation to perform on the queue
+   */
+  private void safeQueueUpdate(Put put) {
+    RowMutations mutations = new RowMutations(put.getRow());
+    try {
+      mutations.add(put);
+    } catch (IOException e){
+      abortable.abort("Failed to update Replication Table because of IOException", e);
+    }
+    safeQueueUpdate(mutations);
+  }
+
+  /**
+   * See safeQueueUpdate(RowMutations mutate)
+   * @param delete Row mutation to perform on the queue
+   */
+  private void safeQueueUpdate(Delete delete) {
+    RowMutations mutations = new RowMutations(delete.getRow());
+    try {
+      mutations.add(delete);
+    } catch (IOException e) {
+      abortable.abort("Failed to update Replication Table because of IOException", e);
+    }
+    safeQueueUpdate(mutations);
+  }
+
+  /**
+   * Attempt to mutate a given queue in the Replication Table with a checkAndPut on the OWNER column
+   * of the queue. Abort the server if this checkAndPut fails: which means we have somehow lost
+   * ownership of the column or an IO Exception has occurred during the transaction.
+   *
+   * @param mutate Mutation to perform on a given queue
+   */
+  private void safeQueueUpdate(RowMutations mutate) {
+    try {
+      boolean updateSuccess = replicationTable.checkAndMutate(mutate.getRow(), CF, COL_OWNER,
+        CompareFilter.CompareOp.EQUAL, serverNameBytes, mutate);
+      if (!updateSuccess) {
+        String errMsg = "Failed to update Replication Table because we lost queue ownership";
+        abortable.abort(errMsg, new ReplicationException(errMsg));
+      }
+    } catch (IOException e) {
+      abortable.abort("Failed to update Replication Table because of IOException", e);
+    }
+  }
+
+  /**
+   * Get the QueueIds belonging to the named server from the ReplicationTable
+   *
+   * @param server name of the server
+   * @return a list of the QueueIds belonging to the server
+   * @throws IOException
+   */
+  private List<String> getQueuesBelongingToServer(String server) throws IOException {
+    List<String> queues = new ArrayList<String>();
+    Scan scan = new Scan();
+    SingleColumnValueFilter filterMyQueues = new SingleColumnValueFilter(CF, COL_OWNER,
+      CompareFilter.CompareOp.EQUAL, Bytes.toBytes(server));
+    scan.setFilter(filterMyQueues);
+    scan.addColumn(CF, COL_QUEUE_ID);
+    scan.addColumn(CF, COL_OWNER);
+    ResultScanner results = replicationTable.getScanner(scan);
+    for (Result result : results) {
+      queues.add(Bytes.toString(result.getValue(CF, COL_QUEUE_ID)));
+    }
+    results.close();
+    return queues;
+  }
+
+  /**
+   * Finds the row key of the HBase row corresponding to the provided queue. This has to be done,
+   * because the row key is [original server name + "-" + queueId0]. And the original server will
+   * make calls to getLog(), getQueue(), etc. with the argument queueId = queueId0.
+   * On the original server we can build the row key by concatenating servername + queueId0.
+   * Yet if the queue is claimed by another server, future calls to getLog(), getQueue(), etc.
+   * will be made with the argument queueId = queueId0 + "-" + pastOwner0 + "-" + pastOwner1 ...
+   * so we need a way to look up rows by their modified queueId's.
+   *
+   * TODO: Consider updating the queueId passed to getLog, getQueue()... inside of ReplicationSource
+   * TODO: and ReplicationSourceManager or the parsing of the passed in queueId's so that we don't
+   * TODO have to scan the table for row keys for each update. See HBASE-15956.
+   *
+   * TODO: We can also cache queueId's if ReplicationQueuesHBaseImpl becomes a bottleneck. We
+   * TODO: currently perform scan's over all the rows looking for one with a matching QueueId.
+   *
+   * @param queueId string representation of the queue id
+   * @return the rowkey of the corresponding queue. This returns null if the corresponding queue
+   * cannot be found.
+   * @throws IOException
+   */
+  private byte[] queueIdToRowKey(String queueId) throws IOException {
+    Scan scan = new Scan();
+    scan.addColumn(CF, COL_QUEUE_ID);
+    scan.addColumn(CF, COL_OWNER);
+    scan.setMaxResultSize(1);
+    // Search for the queue that matches this queueId
+    SingleColumnValueFilter filterByQueueId = new SingleColumnValueFilter(CF, COL_QUEUE_ID,
+        CompareFilter.CompareOp.EQUAL, Bytes.toBytes(queueId));
+    // Make sure that we are the owners of the queue. QueueId's may overlap.
+    SingleColumnValueFilter filterByOwner = new SingleColumnValueFilter(CF, COL_OWNER,
+        CompareFilter.CompareOp.EQUAL, Bytes.toBytes(serverName));
+    // We only want the row key
+    FirstKeyOnlyFilter filterOutColumns = new FirstKeyOnlyFilter();
+    FilterList filterList = new FilterList(filterByQueueId, filterByOwner, filterOutColumns);
+    scan.setFilter(filterList);
+    ResultScanner results = replicationTable.getScanner(scan);
+    Result result = results.next();
+    results.close();
+    return (result == null) ? null : result.getRow();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 2bb8ea8..32d0883 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -41,7 +41,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
 /**
- * This class provides an implementation of the ReplicationQueues interface using ZooKeeper. The
+ * This class provides an implementation of the
+ * interface using ZooKeeper. The
  * base znode that this class works at is the myQueuesZnode. The myQueuesZnode contains a list of
  * all outstanding WAL files on this region server that need to be replicated. The myQueuesZnode is
  * the regionserver name (a concatenation of the region server\u2019s hostname, client port and start
@@ -71,6 +72,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
 
   private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
 
+  public ReplicationQueuesZKImpl(ReplicationQueuesArguments args) {
+    this(args.getZk(), args.getConf(), args.getAbort());
+  }
+
   public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
       Abortable abortable) {
     super(zk, conf, abortable);
@@ -166,8 +171,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   }
 
   @Override
-  public boolean isThisOurZnode(String znode) {
-    return ZKUtil.joinZNode(this.queuesZNode, znode).equals(this.myQueuesZnode);
+  public boolean isThisOurRegionServer(String regionserver) {
+    return ZKUtil.joinZNode(this.queuesZNode, regionserver).equals(this.myQueuesZnode);
   }
 
   @Override
@@ -223,7 +228,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
       this.abortable.abort("Failed to get a list of queues for region server: "
           + this.myQueuesZnode, e);
     }
-    return listOfQueues;
+    return listOfQueues == null ? new ArrayList<String>() : listOfQueues;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
index fa5e222..d55472d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/Replication.java
@@ -48,16 +48,17 @@ import org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
 import org.apache.hadoop.hbase.regionserver.ReplicationSinkService;
 import org.apache.hadoop.hbase.regionserver.ReplicationSourceService;
-import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
-import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationTracker;
+import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
+import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.zookeeper.KeeperException;
 
@@ -127,7 +128,8 @@ public class Replication extends WALActionsListener.Base implements
     if (replication) {
       try {
         this.replicationQueues =
-            ReplicationFactory.getReplicationQueues(server.getZooKeeper(), this.conf, this.server);
+            ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, this.server,
+              server.getZooKeeper()));
         this.replicationQueues.init(this.server.getServerName().toString());
         this.replicationPeers =
             ReplicationFactory.getReplicationPeers(server.getZooKeeper(), this.conf, this.server);
@@ -135,7 +137,7 @@ public class Replication extends WALActionsListener.Base implements
         this.replicationTracker =
             ReplicationFactory.getReplicationTracker(server.getZooKeeper(), this.replicationPeers,
               this.conf, this.server, this.server);
-      } catch (ReplicationException e) {
+      } catch (Exception e) {
         throw new IOException("Failed replication handler create", e);
       }
       UUID clusterId = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index b585513..ed2eecc 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -315,9 +315,6 @@ public class ReplicationSourceManager implements ReplicationListener {
    */
   public void join() {
     this.executor.shutdown();
-    if (this.sources.size() == 0) {
-      this.replicationQueues.removeAllQueues();
-    }
     for (ReplicationSourceInterface source : this.sources) {
       source.terminate("Region server is closing");
     }
@@ -624,7 +621,7 @@ public class ReplicationSourceManager implements ReplicationListener {
 
     @Override
     public void run() {
-      if (this.rq.isThisOurZnode(rsZnode)) {
+      if (this.rq.isThisOurRegionServer(rsZnode)) {
         return;
       }
       // Wait a bit before transferring the queues, we may be shutting down.

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index c3241c9..06a3c7e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeer;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -160,7 +161,7 @@ public class TestReplicationAdmin {
     Configuration conf = TEST_UTIL.getConfiguration();
     ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test HBaseAdmin", null);
     ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(zkw, conf, null);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, null, zkw));
     repQueues.init("server1");
 
     // add queue for ID_ONE

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
index 47db32b..18950a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestLogsCleaner.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
 import org.apache.hadoop.hbase.replication.regionserver.Replication;
@@ -94,7 +95,7 @@ public class TestLogsCleaner {
     Replication.decorateMasterConfiguration(conf);
     Server server = new DummyServer();
     ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
     repQueues.init(server.getServerName().toString());
     final Path oldLogDir = new Path(TEST_UTIL.getDataTestDir(),
         HConstants.HREGION_OLDLOGDIR_NAME);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index d4f23c8..1778e73 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
 import org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
@@ -87,8 +88,7 @@ public class TestReplicationHFileCleaner {
     Replication.decorateMasterConfiguration(conf);
     rp = ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
     rp.init();
-
-    rq = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
+    rq = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server, server.getZooKeeper()));
     rq.init(server.getServerName().toString());
     try {
       fs = FileSystem.get(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index 144046f4..5ab26ab 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -121,7 +121,7 @@ public abstract class TestReplicationStateBasic {
     rq1.removeQueue("bogus");
     rq1.removeLog("bogus", "bogus");
     rq1.removeAllQueues();
-    assertNull(rq1.getAllQueues());
+    assertEquals(0, rq1.getAllQueues().size());
     assertEquals(0, rq1.getLogPosition("bogus", "bogus"));
     assertNull(rq1.getLogsInQueue("bogus"));
     assertEquals(0, rq1.claimQueues(ServerName.valueOf("bogus", 1234, -1L).toString()).size());

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
new file mode 100644
index 0000000..8186213
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.replication;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.CoordinatedStateManager;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import static junit.framework.TestCase.assertNull;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+@Category({ReplicationTests.class, MediumTests.class})
+public class TestReplicationStateHBaseImpl {
+
+  private static Configuration conf;
+  private static HBaseTestingUtility utility;
+  private static Connection connection;
+  private static ReplicationQueues rqH;
+
+  private final String server1 = ServerName.valueOf("hostname1.example.org", 1234, -1L).toString();
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    utility = new HBaseTestingUtility();
+    utility.startMiniCluster();
+    conf = utility.getConfiguration();
+    conf.setClass("hbase.region.replica.replication.ReplicationQueuesType",
+        ReplicationQueuesHBaseImpl.class, ReplicationQueues.class);
+    connection = ConnectionFactory.createConnection(conf);
+  }
+
+  @Test
+  public void checkNamingSchema() throws Exception {
+    rqH.init(server1);
+    assertTrue(rqH.isThisOurRegionServer(server1));
+    assertTrue(!rqH.isThisOurRegionServer(server1 + "a"));
+    assertTrue(!rqH.isThisOurRegionServer(null));
+  }
+
+  @Test
+  public void testReplicationStateHBase() {
+    DummyServer ds = new DummyServer(server1);
+    try {
+      rqH = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds, null));
+      rqH.init(server1);
+      // Check that the proper System Tables have been generated
+      Table replicationTable = connection.getTable(
+          ReplicationQueuesHBaseImpl.REPLICATION_TABLE_NAME);
+      assertTrue(replicationTable.getName().isSystemTable());
+
+    } catch (Exception e) {
+      e.printStackTrace();
+      fail("testReplicationStateHBaseConstruction received an Exception");
+    }
+    try {
+      // Test adding in WAL files
+      assertEquals(0, rqH.getAllQueues().size());
+      rqH.addLog("Queue1", "WALLogFile1.1");
+      assertEquals(1, rqH.getAllQueues().size());
+      rqH.addLog("Queue1", "WALLogFile1.2");
+      rqH.addLog("Queue1", "WALLogFile1.3");
+      rqH.addLog("Queue1", "WALLogFile1.4");
+      rqH.addLog("Queue2", "WALLogFile2.1");
+      rqH.addLog("Queue3", "WALLogFile3.1");
+      assertEquals(3, rqH.getAllQueues().size());
+      assertEquals(4, rqH.getLogsInQueue("Queue1").size());
+      assertEquals(1, rqH.getLogsInQueue("Queue2").size());
+      assertEquals(1, rqH.getLogsInQueue("Queue3").size());
+      // Make sure that abortCount is still 0
+      assertEquals(0, ds.getAbortCount());
+      // Make sure that getting a log from a non-existent queue triggers an abort
+      assertNull(rqH.getLogsInQueue("Queue4"));
+      assertEquals(1, ds.getAbortCount());
+    } catch (ReplicationException e) {
+      e.printStackTrace();
+      fail("testAddLog received a ReplicationException");
+    }
+    try {
+
+      // Test updating the log positions
+      assertEquals(0L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
+      rqH.setLogPosition("Queue1", "WALLogFile1.1", 123L);
+      assertEquals(123L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
+      rqH.setLogPosition("Queue1", "WALLogFile1.1", 123456789L);
+      assertEquals(123456789L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
+      rqH.setLogPosition("Queue2", "WALLogFile2.1", 242L);
+      assertEquals(242L, rqH.getLogPosition("Queue2", "WALLogFile2.1"));
+      rqH.setLogPosition("Queue3", "WALLogFile3.1", 243L);
+      assertEquals(243L, rqH.getLogPosition("Queue3", "WALLogFile3.1"));
+
+      // Test that setting log positions in non-existing logs will cause an abort
+      assertEquals(1, ds.getAbortCount());
+      rqH.setLogPosition("NotHereQueue", "WALLogFile3.1", 243L);
+      assertEquals(2, ds.getAbortCount());
+      rqH.setLogPosition("NotHereQueue", "NotHereFile", 243L);
+      assertEquals(3, ds.getAbortCount());
+      rqH.setLogPosition("Queue1", "NotHereFile", 243l);
+      assertEquals(4, ds.getAbortCount());
+
+      // Test reading log positions for non-existent queues and WAL's
+      try {
+        rqH.getLogPosition("Queue1", "NotHereWAL");
+        fail("Replication queue should have thrown a ReplicationException for reading from a " +
+            "non-existent WAL");
+      } catch (ReplicationException e) {
+      }
+      try {
+        rqH.getLogPosition("NotHereQueue", "NotHereWAL");
+        fail("Replication queue should have thrown a ReplicationException for reading from a " +
+            "non-existent queue");
+      } catch (ReplicationException e) {
+      }
+      // Test removing logs
+      rqH.removeLog("Queue1", "WALLogFile1.1");
+      assertEquals(3, rqH.getLogsInQueue("Queue1").size());
+      // Test removing queues
+      rqH.removeQueue("Queue2");
+      assertEquals(2, rqH.getAllQueues().size());
+      assertNull(rqH.getLogsInQueue("Queue2"));
+      // Test that getting logs from a non-existent queue aborts
+      assertEquals(5, ds.getAbortCount());
+      // Test removing all queues for a Region Server
+      rqH.removeAllQueues();
+      assertEquals(0, rqH.getAllQueues().size());
+      assertNull(rqH.getLogsInQueue("Queue1"));
+      // Test that getting logs from a non-existent queue aborts
+      assertEquals(6, ds.getAbortCount());
+    } catch (ReplicationException e) {
+      e.printStackTrace();
+      fail("testAddLog received a ReplicationException");
+    }
+  }
+
+  static class DummyServer implements Server {
+    private String serverName;
+    private boolean isAborted = false;
+    private boolean isStopped = false;
+    private int abortCount = 0;
+
+    public DummyServer(String serverName) {
+      this.serverName = serverName;
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+      return conf;
+    }
+
+    @Override
+    public ZooKeeperWatcher getZooKeeper() {
+      return null;
+    }
+
+    @Override
+    public CoordinatedStateManager getCoordinatedStateManager() {
+      return null;
+    }
+
+    @Override
+    public ClusterConnection getConnection() {
+      return null;
+    }
+
+    @Override
+    public MetaTableLocator getMetaTableLocator() {
+      return null;
+    }
+
+    @Override
+    public ServerName getServerName() {
+      return ServerName.valueOf(this.serverName);
+    }
+
+    @Override
+    public void abort(String why, Throwable e) {
+      abortCount++;
+      this.isAborted = true;
+    }
+
+    @Override
+    public boolean isAborted() {
+      return this.isAborted;
+    }
+
+    @Override
+    public void stop(String why) {
+      this.isStopped = true;
+    }
+
+    @Override
+    public boolean isStopped() {
+      return this.isStopped;
+    }
+
+    @Override
+    public ChoreService getChoreService() {
+      return null;
+    }
+
+    @Override
+    public ClusterConnection getClusterConnection() {
+      return null;
+    }
+
+    public int getAbortCount() {
+      return abortCount;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index 94dbb25..e731135 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hbase.replication;
 
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 
@@ -33,6 +34,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -91,9 +93,14 @@ public class TestReplicationStateZKImpl extends TestReplicationStateBasic {
     DummyServer ds1 = new DummyServer(server1);
     DummyServer ds2 = new DummyServer(server2);
     DummyServer ds3 = new DummyServer(server3);
-    rq1 = ReplicationFactory.getReplicationQueues(zkw, conf, ds1);
-    rq2 = ReplicationFactory.getReplicationQueues(zkw, conf, ds2);
-    rq3 = ReplicationFactory.getReplicationQueues(zkw, conf, ds3);
+    try {
+      rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds1, zkw));
+      rq2 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds2, zkw));
+      rq3 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds3, zkw));
+    } catch (Exception e) {
+      // This should not occur, because getReplicationQueues() only throws for ReplicationQueuesHBaseImpl
+      fail("ReplicationFactory.getReplicationQueues() threw an IO Exception");
+    }
     rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, ds1);
     rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw);
     OUR_KEY = ZKConfig.getZooKeeperClusterKey(conf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index 9e950d2..d1db068 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -66,6 +66,7 @@ import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueueInfo;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.ReplicationSourceDummy;
 import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
@@ -284,9 +285,11 @@ public class TestReplicationSourceManager {
     LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti");
     conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
     final Server server = new DummyServer("hostname0.example.org");
+
+
     ReplicationQueues rq =
-        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(),
-          server);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server,
+          server.getZooKeeper()));
     rq.init(server.getServerName().toString());
     // populate some znodes in the peer znode
     files.add("log1");
@@ -326,8 +329,8 @@ public class TestReplicationSourceManager {
   public void testCleanupFailoverQueues() throws Exception {
     final Server server = new DummyServer("hostname1.example.org");
     ReplicationQueues rq =
-        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(),
-          server);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server,
+          server.getZooKeeper()));
     rq.init(server.getServerName().toString());
     // populate some znodes in the peer znode
     SortedSet<String> files = new TreeSet<String>();
@@ -341,7 +344,8 @@ public class TestReplicationSourceManager {
     }
     Server s1 = new DummyServer("dummyserver1.example.org");
     ReplicationQueues rq1 =
-        ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1,
+          s1.getZooKeeper()));
     rq1.init(s1.getServerName().toString());
     ReplicationPeers rp1 =
         ReplicationFactory.getReplicationPeers(s1.getZooKeeper(), s1.getConfiguration(), s1);
@@ -365,7 +369,8 @@ public class TestReplicationSourceManager {
     conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
     final Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com");
     ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, server,
+          server.getZooKeeper()));
     repQueues.init(server.getServerName().toString());
     // populate some znodes in the peer znode
     files.add("log1");
@@ -381,16 +386,19 @@ public class TestReplicationSourceManager {
 
     // simulate three servers fail sequentially
     ReplicationQueues rq1 =
-        ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1,
+          s1.getZooKeeper()));
     rq1.init(s1.getServerName().toString());
     SortedMap<String, SortedSet<String>> testMap =
         rq1.claimQueues(server.getServerName().getServerName());
     ReplicationQueues rq2 =
-        ReplicationFactory.getReplicationQueues(s2.getZooKeeper(), s2.getConfiguration(), s2);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s2.getConfiguration(), s2,
+          s2.getZooKeeper()));
     rq2.init(s2.getServerName().toString());
     testMap = rq2.claimQueues(s1.getServerName().getServerName());
     ReplicationQueues rq3 =
-        ReplicationFactory.getReplicationQueues(s3.getZooKeeper(), s3.getConfiguration(), s3);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s3.getConfiguration(), s3,
+          s3.getZooKeeper()));
     rq3.init(s3.getServerName().toString());
     testMap = rq3.claimQueues(s2.getServerName().getServerName());
 
@@ -412,7 +420,8 @@ public class TestReplicationSourceManager {
     conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true);
     final Server s0 = new DummyServer("cversion-change0.example.org");
     ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(s0.getZooKeeper(), conf, s0);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, s0,
+          s0.getZooKeeper()));
     repQueues.init(s0.getServerName().toString());
     // populate some znodes in the peer znode
     files.add("log1");
@@ -423,7 +432,8 @@ public class TestReplicationSourceManager {
     // simulate queue transfer
     Server s1 = new DummyServer("cversion-change1.example.org");
     ReplicationQueues rq1 =
-        ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1,
+          s1.getZooKeeper()));
     rq1.init(s1.getServerName().toString());
 
     ReplicationQueuesClient client =
@@ -522,8 +532,8 @@ public class TestReplicationSourceManager {
       this.deadRsZnode = znode;
       this.server = s;
       this.rq =
-          ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(),
-            server);
+          ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(server.getConfiguration(), server,
+            server.getZooKeeper()));
       this.rq.init(this.server.getServerName().toString());
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/21e98271/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index 2140b39..84ef6da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.regionserver.TestEndToEndSplitTransaction;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationQueues;
+import org.apache.hadoop.hbase.replication.ReplicationQueuesArguments;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.MiscTests;
 import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
@@ -1543,7 +1544,8 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
     // create replicator
     ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "Test Hbase Fsck", connection);
     ReplicationQueues repQueues =
-        ReplicationFactory.getReplicationQueues(zkw, conf, connection);
+        ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, connection,
+          zkw));
     repQueues.init("server1");
     // queues for current peer, no errors
     repQueues.addLog("1", "file1");


[14/50] hbase git commit: HBASE-15727 Canary Tool for Zookeeper (churro morales)

Posted by sy...@apache.org.
HBASE-15727 Canary Tool for Zookeeper (churro morales)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e5d5308
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e5d5308
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e5d5308

Branch: refs/heads/hbase-12439
Commit: 7e5d530870f146dfdee52e5a228ad84f0aefafd7
Parents: cd25880
Author: tedyu <yu...@gmail.com>
Authored: Thu Jun 2 10:15:08 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Thu Jun 2 10:15:08 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/tool/Canary.java    | 142 ++++++++++++++++++-
 .../hadoop/hbase/tool/TestCanaryTool.java       |  24 +++-
 2 files changed, 160 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e5d5308/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index ab9971d..360b0f5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -19,8 +19,14 @@
 
 package org.apache.hadoop.hbase.tool;
 
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT;
+import static org.apache.hadoop.hbase.HConstants.ZOOKEEPER_ZNODE_PARENT;
+
+import com.google.common.collect.Lists;
+
 import java.io.Closeable;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -32,12 +38,12 @@ import java.util.Map;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -78,20 +84,29 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.hbase.util.RegionSplitter;
+import org.apache.hadoop.hbase.zookeeper.EmptyWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.ZooKeeper;
+import org.apache.zookeeper.client.ConnectStringParser;
+import org.apache.zookeeper.data.Stat;
 
 /**
  * HBase Canary Tool, that that can be used to do
  * "canary monitoring" of a running HBase cluster.
  *
- * Here are two modes
+ * Here are three modes
  * 1. region mode - Foreach region tries to get one row per column family
  * and outputs some information about failure or latency.
  *
  * 2. regionserver mode - Foreach regionserver tries to get one row from one table
  * selected randomly and outputs some information about failure or latency.
+ *
+ * 3. zookeeper mode - for each zookeeper instance, selects a zNode and
+ * outputs some information about failure or latency.
  */
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
@@ -188,6 +203,55 @@ public final class Canary implements Tool {
     }
   }
 
+  public static class ZookeeperStdOutSink extends StdOutSink implements ExtendedSink {
+    @Override public void publishReadFailure(String zNode, String server) {
+      incReadFailureCount();
+      LOG.error(String.format("Read from zNode:%s on zookeeper instance:%s", zNode, server));
+    }
+
+    @Override public void publishReadTiming(String znode, String server, long msTime) {
+      LOG.info(String.format("Read from zNode:%s on zookeeper instance:%s in %dms",
+          znode, server, msTime));
+    }
+  }
+
+  static class ZookeeperTask implements Callable<Void> {
+    private final Connection connection;
+    private final String host;
+    private String znode;
+    private final int timeout;
+    private ZookeeperStdOutSink sink;
+
+    public ZookeeperTask(Connection connection, String host, String znode, int timeout,
+        ZookeeperStdOutSink sink) {
+      this.connection = connection;
+      this.host = host;
+      this.znode = znode;
+      this.timeout = timeout;
+      this.sink = sink;
+    }
+
+    @Override public Void call() throws Exception {
+      ZooKeeper zooKeeper = null;
+      try {
+        zooKeeper = new ZooKeeper(host, timeout, EmptyWatcher.instance);
+        Stat exists = zooKeeper.exists(znode, false);
+        StopWatch stopwatch = new StopWatch();
+        stopwatch.start();
+        zooKeeper.getData(znode, false, exists);
+        stopwatch.stop();
+        sink.publishReadTiming(znode, host, stopwatch.getTime());
+      } catch (KeeperException | InterruptedException e) {
+        sink.publishReadFailure(znode, host);
+      } finally {
+        if (zooKeeper != null) {
+          zooKeeper.close();
+        }
+      }
+      return null;
+    }
+  }
+
   /**
    * For each column family of the region tries to get one row and outputs the latency, or the
    * failure.
@@ -462,6 +526,7 @@ public final class Canary implements Tool {
   private long timeout = DEFAULT_TIMEOUT;
   private boolean failOnError = true;
   private boolean regionServerMode = false;
+  private boolean zookeeperMode = false;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
   private boolean treatFailureAsError = false;
@@ -522,6 +587,8 @@ public final class Canary implements Tool {
             System.err.println("-interval needs a numeric value argument.");
             printUsageAndExit();
           }
+        } else if (cmd.equals("-zookeeper")) {
+          this.zookeeperMode = true;
         } else if(cmd.equals("-regionserver")) {
           this.regionServerMode = true;
         } else if(cmd.equals("-allRegions")) {
@@ -578,6 +645,13 @@ public final class Canary implements Tool {
       System.err.println("-allRegions can only be specified in regionserver mode.");
       printUsageAndExit();
     }
+    if (this.zookeeperMode) {
+      if (this.regionServerMode || this.regionServerAllRegions || this.writeSniffing) {
+        System.err.println("-zookeeper is exclusive and cannot be combined with "
+            + "other modes.");
+        printUsageAndExit();
+      }
+    }
     return index;
   }
 
@@ -662,6 +736,8 @@ public final class Canary implements Tool {
     System.err.println("      which means to enable regionserver mode");
     System.err.println("   -allRegions    Tries all regions on a regionserver,");
     System.err.println("      only works in regionserver mode.");
+    System.err.println("   -zookeeper    Tries to grab zookeeper.znode.parent ");
+    System.err.println("      on each zookeeper instance");
     System.err.println("   -daemon        Continuous check at defined intervals.");
     System.err.println("   -interval <N>  Interval between checks (sec)");
     System.err.println("   -e             Use table/regionserver as regular expression");
@@ -700,6 +776,10 @@ public final class Canary implements Tool {
           new RegionServerMonitor(connection, monitorTargets, this.useRegExp,
               (ExtendedSink) this.sink, this.executor, this.regionServerAllRegions,
               this.treatFailureAsError);
+    } else if (this.zookeeperMode) {
+      monitor =
+          new ZookeeperMonitor(connection, monitorTargets, this.useRegExp,
+              (ZookeeperStdOutSink) this.sink, this.executor, this.treatFailureAsError);
     } else {
       monitor =
           new RegionMonitor(connection, monitorTargets, this.useRegExp, this.sink, this.executor,
@@ -1040,6 +1120,62 @@ public final class Canary implements Tool {
     }
     return executor.invokeAll(tasks);
   }
+
+  //  monitor for zookeeper mode
+  private static class ZookeeperMonitor extends Monitor {
+    private List<String> hosts;
+    private final String znode;
+    private final int timeout;
+
+    protected ZookeeperMonitor(Connection connection, String[] monitorTargets, boolean useRegExp,
+        ExtendedSink sink, ExecutorService executor, boolean treatFailureAsError)  {
+      super(connection, monitorTargets, useRegExp, sink, executor, treatFailureAsError);
+      Configuration configuration = connection.getConfiguration();
+      znode =
+          configuration.get(ZOOKEEPER_ZNODE_PARENT,
+              DEFAULT_ZOOKEEPER_ZNODE_PARENT);
+      timeout = configuration
+          .getInt(HConstants.ZK_SESSION_TIMEOUT, HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
+      ConnectStringParser parser =
+          new ConnectStringParser(ZKConfig.getZKQuorumServersString(configuration));
+      hosts = Lists.newArrayList();
+      for (InetSocketAddress server : parser.getServerAddresses()) {
+        hosts.add(server.toString());
+      }
+    }
+
+    @Override public void run() {
+      List<ZookeeperTask> tasks = Lists.newArrayList();
+      for (final String host : hosts) {
+        tasks.add(new ZookeeperTask(connection, host, znode, timeout, getSink()));
+      }
+      try {
+        for (Future<Void> future : this.executor.invokeAll(tasks)) {
+          try {
+            future.get();
+          } catch (ExecutionException e) {
+            LOG.error("Sniff zookeeper failed!", e);
+            this.errorCode = ERROR_EXIT_CODE;
+          }
+        }
+      } catch (InterruptedException e) {
+        this.errorCode = ERROR_EXIT_CODE;
+        Thread.currentThread().interrupt();
+        LOG.error("Sniff zookeeper interrupted!", e);
+      }
+      this.done = true;
+    }
+
+
+    private ZookeeperStdOutSink getSink() {
+      if (!(sink instanceof ZookeeperStdOutSink)) {
+        throw new RuntimeException("Can only write to zookeeper sink");
+      }
+      return ((ZookeeperStdOutSink) sink);
+    }
+  }
+
+
   // a monitor for regionserver mode
   private static class RegionServerMonitor extends Monitor {
 
@@ -1255,7 +1391,7 @@ public final class Canary implements Tool {
     new GenericOptionsParser(conf, args);
 
     int numThreads = conf.getInt("hbase.canary.threads.num", MAX_THREADS_NUM);
-    LOG.info("Number of exection threads " + numThreads);
+    LOG.info("Number of execution threads " + numThreads);
 
     ExecutorService executor = new ScheduledThreadPoolExecutor(numThreads);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e5d5308/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
index 755e5ba..fd67186 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.log4j.Appender;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.spi.LoggingEvent;
+import com.google.common.collect.Iterables;
+import org.apache.hadoop.hbase.HConstants;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -40,13 +42,11 @@ import org.mockito.ArgumentMatcher;
 import org.mockito.Mock;
 import org.mockito.runners.MockitoJUnitRunner;
 
-
-import java.util.List;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 
-import static org.junit.Assert.*;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.atLeastOnce;
 import static org.mockito.Mockito.spy;
@@ -79,6 +79,24 @@ public class TestCanaryTool {
   Appender mockAppender;
 
   @Test
+  public void testBasicZookeeperCanaryWorks() throws Exception {
+    Integer port =
+        Iterables.getOnlyElement(testingUtility.getZkCluster().getClientPortList(), null);
+    testingUtility.getConfiguration().set(HConstants.ZOOKEEPER_QUORUM,
+        "localhost:" + port + "/hbase");
+    ExecutorService executor = new ScheduledThreadPoolExecutor(2);
+    Canary.ZookeeperStdOutSink sink = spy(new Canary.ZookeeperStdOutSink());
+    Canary canary = new Canary(executor, sink);
+    String[] args = { "-t", "10000", "-zookeeper" };
+    ToolRunner.run(testingUtility.getConfiguration(), canary, args);
+
+    String baseZnode = testingUtility.getConfiguration()
+        .get(HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);
+    verify(sink, atLeastOnce())
+        .publishReadTiming(eq(baseZnode), eq("localhost:" + port), anyLong());
+  }
+
+  @Test
   public void testBasicCanaryWorks() throws Exception {
     TableName tableName = TableName.valueOf("testTable");
     Table table = testingUtility.createTable(tableName, new byte[][] { FAMILY });


[30/50] hbase git commit: HBASE-15957 RpcClientImpl.close never ends in some circumstances

Posted by sy...@apache.org.
HBASE-15957 RpcClientImpl.close never ends in some circumstances

Signed-off-by: Enis Soztutar <en...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/da88b482
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/da88b482
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/da88b482

Branch: refs/heads/hbase-12439
Commit: da88b4824054f57fbcbc7795469ab2369a39b5ed
Parents: 376ad0d
Author: Sergey Soldatov <ss...@apache.org>
Authored: Sun Jun 5 23:46:03 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Jun 7 11:33:03 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/ipc/RpcClientImpl.java  |  5 ++-
 .../hbase/ipc/IntegrationTestRpcClient.java     | 35 ++++++++++++++++----
 2 files changed, 31 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/da88b482/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index d8c87e9..dc05af1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -1202,9 +1202,8 @@ public class RpcClientImpl extends AbstractRpcClient {
     }
     if (connsToClose != null) {
       for (Connection conn : connsToClose) {
-        if (conn.markClosed(new InterruptedIOException("RpcClient is closing"))) {
-          conn.close();
-        }
+        conn.markClosed(new InterruptedIOException("RpcClient is closing"));
+        conn.close();
       }
     }
     // wait until all connections are closed

http://git-wip-us.apache.org/repos/asf/hbase/blob/da88b482/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
index c28f3e6..6c0fbcc 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ipc/IntegrationTestRpcClient.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hbase.ipc;
 
+import static org.apache.hadoop.hbase.ipc.RpcClient.SPECIFIC_WRITE_THREAD;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -41,12 +42,6 @@ import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.ipc.AbstractRpcClient;
-import org.apache.hadoop.hbase.ipc.AsyncRpcClient;
-import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
-import org.apache.hadoop.hbase.ipc.RpcClientImpl;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos;
 import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoRequestProto;
 import org.apache.hadoop.hbase.ipc.protobuf.generated.TestProtos.EchoResponseProto;
@@ -290,6 +285,7 @@ public class IntegrationTestRpcClient {
   static class SimpleClient extends Thread {
     AbstractRpcClient rpcClient;
     AtomicBoolean running = new  AtomicBoolean(true);
+    AtomicBoolean sending = new AtomicBoolean(false);
     AtomicReference<Throwable> exception = new AtomicReference<>(null);
     Cluster cluster;
     String id;
@@ -319,6 +315,7 @@ public class IntegrationTestRpcClient {
           if (address == null) {
             throw new IOException("Listener channel is closed");
           }
+          sending.set(true);
           ret = (EchoResponseProto)
               rpcClient.callBlockingMethod(md, null, param, ret, user, address);
         } catch (Exception e) {
@@ -340,6 +337,9 @@ public class IntegrationTestRpcClient {
     void stopRunning() {
       running.set(false);
     }
+    boolean isSending() {
+      return sending.get();
+    }
 
     void rethrowException() throws Throwable {
       if (exception.get() != null) {
@@ -348,6 +348,29 @@ public class IntegrationTestRpcClient {
     }
   }
 
+  /*
+  Test that not started connections are successfully removed from connection pool when
+  rpc client is closing.
+   */
+  @Test (timeout = 30000)
+  public void testRpcWithWriteThread() throws IOException, InterruptedException {
+    LOG.info("Starting test");
+    Cluster cluster = new Cluster(1, 1);
+    cluster.startServer();
+    conf.setBoolean(SPECIFIC_WRITE_THREAD, true);
+    for(int i = 0; i <1000; i++) {
+      AbstractRpcClient rpcClient = createRpcClient(conf, true);
+      SimpleClient client = new SimpleClient(cluster, rpcClient, "Client1");
+      client.start();
+      while(!client.isSending()) {
+        Thread.sleep(1);
+      }
+      client.stopRunning();
+      rpcClient.close();
+    }
+  }
+
+
   @Test (timeout = 900000)
   public void testRpcWithChaosMonkeyWithSyncClient() throws Throwable {
     for (int i = 0; i < numIterations; i++) {


[13/50] hbase git commit: HBASE-15933 Addendum - make merge decision when sizes of both regions are known

Posted by sy...@apache.org.
HBASE-15933 Addendum - make merge decision when sizes of both regions are known


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd258800
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd258800
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd258800

Branch: refs/heads/hbase-12439
Commit: cd2588001cf31ad2fb2020f9e021c9b1be1b76fc
Parents: 9593a9f
Author: tedyu <yu...@gmail.com>
Authored: Thu Jun 2 09:30:36 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Thu Jun 2 09:30:36 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java     | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd258800/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index d209eb7..7a54d87 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -182,7 +182,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
         if (mergeEnabled) {
           HRegionInfo hri2 = tableRegions.get(candidateIdx+1);
           long regionSize2 = getRegionSize(hri2);
-          if (regionSize + regionSize2 < avgRegionSize) {
+          if (regionSize > 0 && regionSize2 > 0 && regionSize + regionSize2 < avgRegionSize) {
             LOG.info("Table " + table + ", small region size: " + regionSize
               + " plus its neighbor size: " + regionSize2
               + ", less than the avg size " + avgRegionSize + ", merging them");


[27/50] hbase git commit: HBASE-15965 - Testing by executing a command will cover the exact path users will trigger, so its better then directly calling library functions in tests. Changing the tests to use @shell.command(:, args) to execute the

Posted by sy...@apache.org.
HBASE-15965
- Testing by executing a command will cover the exact path users will trigger, so its better then directly calling library functions in tests. Changing the tests to use @shell.command(:<command>, args) to execute them like it's a command coming from shell.

Norm change:
Commands should print the output user would like to see, but in the end, should also return the relevant value. This way:
- Tests can use returned value to check that functionality works
- Tests can capture stdout to assert particular kind of output user should see.
- We do not print the return value in interactive mode and keep the output clean. See Shell.command() function.

Bugs found due to this change:
- Uncovered bug in major_compact.rb with this approach. It was calling admin.majorCompact() which doesn't exist but our tests didn't catch it since they directly tested admin.major_compact()
- Enabled TestReplicationShell. If it's bad, flaky infra will take care of it.

Change-Id: I5d8af16bf477a79a2f526a5bf11c245b02b7d276


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/15c03fd1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/15c03fd1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/15c03fd1

Branch: refs/heads/hbase-12439
Commit: 15c03fd1c97c271aca6dc30feab35ec0c9f8edbe
Parents: 3d7840a
Author: Apekshit Sharma <ap...@apache.org>
Authored: Mon Jun 6 13:35:06 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Mon Jun 6 17:50:22 2016 -0700

----------------------------------------------------------------------
 .../replication/ReplicationPeerConfig.java      |   6 +-
 hbase-shell/src/main/ruby/hbase/admin.rb        |  23 +-
 hbase-shell/src/main/ruby/hbase/table.rb        |   1 +
 hbase-shell/src/main/ruby/shell.rb              |  12 +-
 hbase-shell/src/main/ruby/shell/commands.rb     |   2 +
 .../main/ruby/shell/commands/balance_rsgroup.rb |   9 +-
 .../src/main/ruby/shell/commands/create.rb      |   2 +
 .../src/main/ruby/shell/commands/exists.rb      |   4 +-
 .../src/main/ruby/shell/commands/get_auths.rb   |   1 +
 .../main/ruby/shell/commands/get_peer_config.rb |   1 +
 .../src/main/ruby/shell/commands/is_enabled.rb  |   4 +-
 .../shell/commands/list_namespace_tables.rb     |   1 +
 .../ruby/shell/commands/list_peer_configs.rb    |   1 +
 .../src/main/ruby/shell/commands/list_peers.rb  |   1 +
 .../main/ruby/shell/commands/locate_region.rb   |   1 +
 .../main/ruby/shell/commands/major_compact.rb   |   2 +-
 .../ruby/shell/commands/show_peer_tableCFs.rb   |   4 +-
 .../src/main/ruby/shell/commands/truncate.rb    |   3 +-
 .../ruby/shell/commands/truncate_preserve.rb    |   3 +-
 .../hbase/client/TestReplicationShell.java      |   2 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb   | 229 +++++++++----------
 .../test/ruby/hbase/replication_admin_test.rb   | 110 ++++-----
 .../ruby/hbase/visibility_labels_admin_test.rb  |  20 +-
 hbase-shell/src/test/ruby/test_helper.rb        |  23 +-
 24 files changed, 254 insertions(+), 211 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 7799de6..1d2066c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -96,8 +96,10 @@ public class ReplicationPeerConfig {
   @Override
   public String toString() {
     StringBuilder builder = new StringBuilder("clusterKey=").append(clusterKey).append(",");
-    builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(",")
-        .append("tableCFs=").append(tableCFsMap.toString());
+    builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(",");
+    if (tableCFsMap != null) {
+      builder.append("tableCFs=").append(tableCFsMap.toString());
+    }
     return builder.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index f32376d..d66c1d6 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -458,16 +458,17 @@ module Hbase
 
     #----------------------------------------------------------------------------------------------
     # Truncates table (deletes all records by recreating the table)
-    def truncate(table_name_str, conf = @conf)
+    def truncate(table_name_str)
+      puts "Truncating '#{table_name_str}' table (it may take a while):"
       table_name = TableName.valueOf(table_name_str)
       table_description = @admin.getTableDescriptor(table_name)
       raise ArgumentError, "Table #{table_name_str} is not enabled. Enable it first." unless
           enabled?(table_name_str)
-      yield 'Disabling table...' if block_given?
+      puts 'Disabling table...'
       @admin.disableTable(table_name)
 
       begin
-        yield 'Truncating table...' if block_given?
+        puts 'Truncating table...'
         @admin.truncateTable(table_name, false)
       rescue => e
         # Handle the compatibility case, where the truncate method doesn't exists on the Master
@@ -475,10 +476,10 @@ module Hbase
         rootCause = e.cause
         if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then
           # Handle the compatibility case, where the truncate method doesn't exists on the Master
-          yield 'Dropping table...' if block_given?
+          puts 'Dropping table...'
           @admin.deleteTable(table_name)
 
-          yield 'Creating table...' if block_given?
+          puts 'Creating table...'
           @admin.createTable(table_description)
         else
           raise e
@@ -488,9 +489,9 @@ module Hbase
 
     #----------------------------------------------------------------------------------------------
     # Truncates table while maintaing region boundaries (deletes all records by recreating the table)
-    def truncate_preserve(table_name_str, conf = @conf)
+    def truncate_preserve(table_name_str)
+      puts "Truncating '#{table}' table (it may take a while):"
       table_name = TableName.valueOf(table_name_str)
-      h_table = @connection.getTable(table_name)
       locator = @connection.getRegionLocator(table_name)
       begin
         splits = locator.getAllRegionLocations().
@@ -501,11 +502,11 @@ module Hbase
       end
 
       table_description = @admin.getTableDescriptor(table_name)
-      yield 'Disabling table...' if block_given?
+      puts 'Disabling table...'
       disable(table_name_str)
 
       begin
-        yield 'Truncating table...' if block_given?
+        puts 'Truncating table...'
         @admin.truncateTable(table_name, true)
       rescue => e
         # Handle the compatibility case, where the truncate method doesn't exists on the Master
@@ -513,10 +514,10 @@ module Hbase
         rootCause = e.cause
         if rootCause.kind_of?(org.apache.hadoop.hbase.DoNotRetryIOException) then
           # Handle the compatibility case, where the truncate method doesn't exists on the Master
-          yield 'Dropping table...' if block_given?
+          puts 'Dropping table...'
           @admin.deleteTable(table_name)
 
-          yield 'Creating table with region boundaries...' if block_given?
+          puts 'Creating table with region boundaries...'
           @admin.createTable(table_description, splits)
         else
           raise e

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/hbase/table.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb
index e64b4ee..a90760e 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -719,6 +719,7 @@ EOF
           map{|i| Bytes.toStringBinary(i.getRegionInfo().getStartKey)}.delete_if{|k| k == ""}
       locator.close()
       puts("Total number of splits = %s" % [splits.size + 1])
+      puts splits
       return splits
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 35626d9..d0cb577 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -134,9 +134,17 @@ module Shell
       ::Shell.commands[command.to_s].new(self)
     end
 
-    #call the method 'command' on the specified command
+    # call the method 'command' on the specified command
+    # If interactive is enabled, then we suppress the return value. The command should have
+    # printed relevant output.
+    # Return value is only useful in non-interactive mode, for e.g. tests.
     def command(command, *args)
-      internal_command(command, :command, *args)
+      ret = internal_command(command, :command, *args)
+      if self.interactive
+        return nil
+      else
+        return ret
+      end
     end
 
     # call a specific internal method in the command instance

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index f86904c..98fcf60 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -17,6 +17,8 @@
 # limitations under the License.
 #
 
+require 'shell/formatter'
+
 module Shell
   module Commands
     class Command

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
index bee139f..c925f28 100644
--- a/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/balance_rsgroup.rb
@@ -30,7 +30,14 @@ EOF
       end
 
       def command(group_name)
-        rsgroup_admin.balance_rs_group(group_name)
+        # Returns true if balancer was run, otherwise false.
+        ret = rsgroup_admin.balance_rs_group(group_name)
+        if ret
+          puts "Ran the balancer."
+        else
+          puts "Couldn't run the balancer."
+        end
+        ret
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/create.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/create.rb b/hbase-shell/src/main/ruby/shell/commands/create.rb
index c237ca9..ee14455 100644
--- a/hbase-shell/src/main/ruby/shell/commands/create.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/create.rb
@@ -64,6 +64,8 @@ EOF
       def command(table, *args)
         admin.create(table, *args)
         @end_time = Time.now
+        puts "Created table " + table.to_s
+
         #and then return the table just created
         table(table)
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/exists.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/exists.rb b/hbase-shell/src/main/ruby/shell/commands/exists.rb
index 7a64813..4eb13a4 100644
--- a/hbase-shell/src/main/ruby/shell/commands/exists.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/exists.rb
@@ -29,9 +29,11 @@ EOF
       end
 
       def command(table)
+        exists = admin.exists?(table.to_s)
         formatter.row([
-            "Table #{table} " + (admin.exists?(table.to_s) ? "does exist" : "does not exist")
+            "Table #{table} " + (exists ? "does exist" : "does not exist")
           ])
+        exists
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
index 04b486b..4ea1b2e 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
@@ -35,6 +35,7 @@ EOF
         list.each do |auths|
           formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)])
         end
+        list
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
index 3da6bdf..6417980 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
@@ -28,6 +28,7 @@ module Shell
         peer_config = replication_admin.get_peer_config(id)
         @start_time = Time.now
         format_peer_config(peer_config)
+        peer_config
       end
 
       def format_peer_config(peer_config)

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
index d8fb2ab..da9c566 100644
--- a/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
@@ -29,7 +29,9 @@ EOF
       end
 
       def command(table)
-        formatter.row([admin.enabled?(table)? "true" : "false"])
+        enabled = admin.enabled?(table)
+        formatter.row([enabled ? "true" : "false"])
+        enabled
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb b/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
index 9db090e..30d4db0 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
@@ -38,6 +38,7 @@ EOF
         end
 
         formatter.footer(list.size)
+        list
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb b/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
index 153e0ce..8946e39 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
@@ -35,6 +35,7 @@ module Shell
             formatter.row([" "])
           end
         end
+        peer_configs
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index c5c3397..72a0704 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -41,6 +41,7 @@ EOF
         end
 
         formatter.footer()
+        peers
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/locate_region.rb b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
index a2815d6..e2487c1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
@@ -36,6 +36,7 @@ EOF
         formatter.header([ "HOST", "REGION" ])
         formatter.row([region_location.getHostnamePort(), hri.toString()])
         formatter.footer(1)
+        region_location
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/major_compact.rb b/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
index 1af6c64..9b0573c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
@@ -44,7 +44,7 @@ module Shell
       end
 
       def command(table_or_region_name, family = nil, type = "NORMAL")
-        admin.majorCompact(table_or_region_name, family, type)
+        admin.major_compact(table_or_region_name, family, type)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb
index 3ce3d06..b6b6956 100644
--- a/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/show_peer_tableCFs.rb
@@ -30,7 +30,9 @@ module Shell
       end
 
       def command(id)
-        puts replication_admin.show_peer_tableCFs(id)
+        peer_table_cfs = replication_admin.show_peer_tableCFs(id)
+        puts peer_table_cfs
+        peer_table_cfs
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/truncate.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/truncate.rb b/hbase-shell/src/main/ruby/shell/commands/truncate.rb
index 3f888c6..aff51ac 100644
--- a/hbase-shell/src/main/ruby/shell/commands/truncate.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/truncate.rb
@@ -27,8 +27,7 @@ EOF
       end
 
       def command(table)
-        puts "Truncating '#{table}' table (it may take a while):"
-        admin.truncate(table) { |log| puts " - #{log}" }
+        admin.truncate(table)
       end
 
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb b/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
index fcce5e5..8bb3131 100644
--- a/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
@@ -27,8 +27,7 @@ EOF
       end
 
       def command(table)
-        puts "Truncating '#{table}' table (it may take a while):"
-        admin.truncate_preserve(table) { |log| puts " - #{log}" }
+        admin.truncate_preserve(table)
       end
 
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
index 3f4af05..04fbc7a 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/TestReplicationShell.java
@@ -28,7 +28,7 @@ import org.junit.experimental.categories.Category;
 
 @Category({ ClientTests.class, LargeTests.class })
 public class TestReplicationShell extends AbstractTestShell {
-  @Ignore ("Disabled because hangs on occasion.. about 10% of the time") @Test
+  @Test
   public void testRunShellTests() throws IOException {
     System.setProperty("shell.test.include", "replication_admin_test.rb");
     // Start all ruby tests

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/test/ruby/hbase/admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index e2c3bc0..cf9cf64 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -41,21 +41,21 @@ module Hbase
     end
 
     define_test "exists? should return true when a table exists" do
-      assert(admin.exists?('hbase:meta'))
+      assert(command(:exists, 'hbase:meta'))
     end
 
     define_test "exists? should return false when a table exists" do
-      assert(!admin.exists?('NOT.EXISTS'))
+      assert(!command(:exists, 'NOT.EXISTS'))
     end
 
     define_test "enabled? should return true for enabled tables" do
-      admin.enable(@test_name)
-      assert(admin.enabled?(@test_name))
+      command(:enable, @test_name)
+      assert(command(:is_enabled, @test_name))
     end
 
     define_test "enabled? should return false for disabled tables" do
-      admin.disable(@test_name)
-      assert(!admin.enabled?(@test_name))
+      command(:disable, @test_name)
+      assert(!command(:is_enabled, @test_name))
     end
   end
 
@@ -78,63 +78,67 @@ module Hbase
     end
 
     define_test "list should return a list of tables" do
-      assert(admin.list.member?(@test_name))
+      list = command(:list)
+      assert(list.member?(@test_name))
     end
 
     define_test "list should not return meta tables" do
-      assert(!admin.list.member?('hbase:meta'))
+      list = command(:list)
+      assert(!list.member?('hbase:meta'))
     end
 
     define_test "list_namespace_tables for the system namespace should return a list of tables" do
-      assert(admin.list_namespace_tables('hbase').count > 0)
+      list = command(:list_namespace_tables, 'hbase')
+      assert(list.count > 0)
     end
 
     define_test "list_namespace_tables for the default namespace should return a list of tables" do
-      assert(admin.list_namespace_tables('default').count > 0)
+      list = command(:list_namespace_tables, 'default')
+      assert(list.count > 0)
     end
 
     #-------------------------------------------------------------------------------
 
     define_test "flush should work" do
-      admin.flush('hbase:meta')
+      command(:flush, 'hbase:meta')
     end
 
     #-------------------------------------------------------------------------------
 
     define_test "compact should work" do
-      admin.compact('hbase:meta')
+      command(:compact, 'hbase:meta')
     end
 
     #-------------------------------------------------------------------------------
 
     define_test "major_compact should work" do
-      admin.major_compact('hbase:meta')
+      command(:major_compact, 'hbase:meta')
     end
 
     #-------------------------------------------------------------------------------
 
     define_test "split should work" do
-      admin.split('hbase:meta', nil)
+      command(:split, 'hbase:meta', nil)
     end
 
     #-------------------------------------------------------------------------------
 
     define_test "drop should fail on non-existent tables" do
       assert_raise(ArgumentError) do
-        admin.drop('NOT.EXISTS')
+        command(:drop, 'NOT.EXISTS')
       end
     end
 
     define_test "drop should fail on enabled tables" do
       assert_raise(ArgumentError) do
-        admin.drop(@test_name)
+        command(:drop, @test_name)
       end
     end
 
     define_test "drop should drop tables" do
-      admin.disable(@test_name)
-      admin.drop(@test_name)
-      assert(!admin.exists?(@test_name))
+      command(:disable, @test_name)
+      command(:drop, @test_name)
+      assert(!command(:exists, @test_name))
     end
 
     #-------------------------------------------------------------------------------
@@ -147,45 +151,46 @@ module Hbase
 
     define_test "create should fail with non-string table names" do
       assert_raise(ArgumentError) do
-        admin.create(123, 'xxx')
+        command(:create, 123, 'xxx')
       end
     end
 
     define_test "create should fail with non-string/non-hash column args" do
       assert_raise(ArgumentError) do
-        admin.create(@create_test_name, 123)
+        command(:create, @create_test_name, 123)
       end
     end
 
     define_test "create should fail without columns" do
       drop_test_table(@create_test_name)
       assert_raise(ArgumentError) do
-        admin.create(@create_test_name)
+        command(:create, @create_test_name)
       end
     end
     
     define_test "create should fail without columns when called with options" do
       drop_test_table(@create_test_name)
       assert_raise(ArgumentError) do
-        admin.create(@create_test_name, { OWNER => 'a' })
+        command(:create, @create_test_name, { OWNER => 'a' })
       end
     end
 
     define_test "create should work with string column args" do
       drop_test_table(@create_test_name)
-      admin.create(@create_test_name, 'a', 'b')
+      command(:create, @create_test_name, 'a', 'b')
       assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
      end
 
     define_test "create should work with hash column args" do
       drop_test_table(@create_test_name)
-      admin.create(@create_test_name, { NAME => 'a'}, { NAME => 'b'})
+      command(:create, @create_test_name, { NAME => 'a'}, { NAME => 'b'})
       assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
     end
     
     define_test "create should be able to set table options" do
       drop_test_table(@create_test_name)
-      admin.create(@create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678, OWNER => '987654321')
+      command(:create, @create_test_name, 'a', 'b', 'MAX_FILESIZE' => 12345678,
+              OWNER => '987654321')
       assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
       assert_match(/12345678/, admin.describe(@create_test_name))
       assert_match(/987654321/, admin.describe(@create_test_name))
@@ -193,14 +198,15 @@ module Hbase
         
     define_test "create should ignore table_att" do
       drop_test_table(@create_test_name)
-      admin.create(@create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321')
+      command(:create, @create_test_name, 'a', 'b', METHOD => 'table_att', OWNER => '987654321')
       assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
       assert_match(/987654321/, admin.describe(@create_test_name))
     end
     
     define_test "create should work with SPLITALGO" do
       drop_test_table(@create_test_name)
-      admin.create(@create_test_name, 'a', 'b', {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'})
+      command(:create, @create_test_name, 'a', 'b',
+              {NUMREGIONS => 10, SPLITALGO => 'HexStringSplit'})
       assert_equal(['a:', 'b:'], table(@create_test_name).get_all_columns.sort)
     end
 
@@ -223,17 +229,13 @@ module Hbase
       table(@test_name).put(2, "x:a", 2)
       assert_equal(2, table(@test_name)._count_internal)
       # This is hacky.  Need to get the configuration into admin instance
-      admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration)
+      command(:truncate, @test_name)
       assert_equal(0, table(@test_name)._count_internal)
     end
 
     define_test "truncate should yield log records" do
-      logs = []
-      admin.truncate(@test_name, $TEST_CLUSTER.getConfiguration) do |log|
-        assert_kind_of(String, log)
-        logs << log
-      end
-      assert(!logs.empty?)
+      output = capture_stdout { command(:truncate, @test_name) }
+      assert(!output.empty?)
     end
   end
 
@@ -253,16 +255,16 @@ module Hbase
     end
 
     define_test "close_region should allow encoded & non-encoded region names" do
-      region = admin.locate_region(@test_name, '')
+      region = command(:locate_region, @test_name, '')
       serverName = region.getServerName().getServerName()
       regionName = region.getRegionInfo().getRegionNameAsString()
       encodedRegionName = region.getRegionInfo().getEncodedName()
 
       # Close region with just region name.
-      admin.close_region(regionName, nil)
+      command(:close_region, regionName, nil)
       # Close region with region name and server.
-      admin.close_region(regionName, serverName)
-      admin.close_region(encodedRegionName, serverName)
+      command(:close_region, regionName, serverName)
+      command(:close_region, encodedRegionName, serverName)
     end
   end
 
@@ -286,77 +288,68 @@ module Hbase
 
     define_test "alter should fail with non-string table names" do
       assert_raise(ArgumentError) do
-        admin.alter(123, true, METHOD => 'delete', NAME => 'y')
+        command(:alter, 123, METHOD => 'delete', NAME => 'y')
       end
     end
 
     define_test "alter should fail with non-existing tables" do
       assert_raise(ArgumentError) do
-        admin.alter('NOT.EXISTS', true, METHOD => 'delete', NAME => 'y')
+        command(:alter, 'NOT.EXISTS', METHOD => 'delete', NAME => 'y')
       end
     end
 
     define_test "alter should not fail with enabled tables" do
-      admin.enable(@test_name)
-      admin.alter(@test_name, true, METHOD => 'delete', NAME => 'y')
+      command(:enable, @test_name)
+      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
     end
 
     define_test "alter should be able to delete column families" do
       assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      admin.alter(@test_name, true, METHOD => 'delete', NAME => 'y')
-      admin.enable(@test_name)
+      command(:alter, @test_name, METHOD => 'delete', NAME => 'y')
+      command(:enable, @test_name)
       assert_equal(['x:'], table(@test_name).get_all_columns.sort)
     end
 
     define_test "alter should be able to add column families" do
       assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      admin.alter(@test_name, true, NAME => 'z')
-      admin.enable(@test_name)
+      command(:alter, @test_name, NAME => 'z')
+      command(:enable, @test_name)
       assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
     end
 
     define_test "alter should be able to add column families (name-only alter spec)" do
       assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      admin.alter(@test_name, true, 'z')
-      admin.enable(@test_name)
+      command(:alter, @test_name, 'z')
+      command(:enable, @test_name)
       assert_equal(['x:', 'y:', 'z:'], table(@test_name).get_all_columns.sort)
     end
 
     define_test "alter should support more than one alteration in one call" do
       assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      alterOutput = capture_stdout { admin.alter(@test_name, true, { NAME => 'z' },
-        { METHOD => 'delete', NAME => 'y' }, 'MAX_FILESIZE' => 12345678) }
-      admin.enable(@test_name)
+      alterOutput = capture_stdout {
+        command(:alter, @test_name, { NAME => 'z' }, { METHOD => 'delete', NAME => 'y' },
+                'MAX_FILESIZE' => 12345678) }
+      command(:enable, @test_name)
       assert_equal(1, /Updating all regions/.match(alterOutput).size,
         "HBASE-15641 - Should only perform one table modification per alter.")
       assert_equal(['x:', 'z:'], table(@test_name).get_all_columns.sort)
       assert_match(/12345678/, admin.describe(@test_name))
     end
 
-    def capture_stdout
-      begin
-        old_stdout = $stdout
-        $stdout = StringIO.new('','w')
-        yield
-        $stdout.string
-      ensure
-        $stdout = old_stdout
-      end
-    end
 
     define_test 'alter should support shortcut DELETE alter specs' do
       assert_equal(['x:', 'y:'], table(@test_name).get_all_columns.sort)
-      admin.alter(@test_name, true, 'delete' => 'y')
+      command(:alter, @test_name, 'delete' => 'y')
       assert_equal(['x:'], table(@test_name).get_all_columns.sort)
     end
 
     define_test "alter should be able to change table options" do
-      admin.alter(@test_name, true, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678)
+      command(:alter, @test_name, METHOD => 'table_att', 'MAX_FILESIZE' => 12345678)
       assert_match(/12345678/, admin.describe(@test_name))
     end
 
     define_test "alter should be able to change table options w/o table_att" do
-      admin.alter(@test_name, true, 'MAX_FILESIZE' => 12345678)
+      command(:alter, @test_name, 'MAX_FILESIZE' => 12345678)
       assert_match(/12345678/, admin.describe(@test_name))
     end
     
@@ -372,7 +365,7 @@ module Hbase
       # eval() is used to convert a string to regex
       assert_no_match(eval("/" + class_name + "/"), admin.describe(@test_name))
       assert_no_match(eval("/" + cp_key + "/"), admin.describe(@test_name))
-      admin.alter(@test_name, true, 'METHOD' => 'table_att', cp_key => cp_value)
+      command(:alter, @test_name, 'METHOD' => 'table_att', cp_key => cp_value)
       assert_match(eval("/" + class_name + "/"), admin.describe(@test_name))
       assert_match(eval("/" + cp_key + "\\$(\\d+)/"), admin.describe(@test_name))
     end
@@ -382,12 +375,12 @@ module Hbase
       create_test_table(@test_name)
 
       key = "MAX_FILESIZE"
-      admin.alter(@test_name, true, 'METHOD' => 'table_att', key => 12345678)
+      command(:alter, @test_name, 'METHOD' => 'table_att', key => 12345678)
 
       # eval() is used to convert a string to regex
       assert_match(eval("/" + key + "/"), admin.describe(@test_name))
 
-      admin.alter(@test_name, true, 'METHOD' => 'table_att_unset', 'NAME' => key)
+      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => key)
       assert_no_match(eval("/" + key + "/"), admin.describe(@test_name))
     end
 
@@ -396,13 +389,13 @@ module Hbase
 
       key_1 = "TestAttr1"
       key_2 = "TestAttr2"
-      admin.create(@test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 })
+      command(:create, @test_name, { NAME => 'i'}, METADATA => { key_1 => 1, key_2 => 2 })
 
       # eval() is used to convert a string to regex
       assert_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
       assert_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
 
-      admin.alter(@test_name, true, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ])
+      command(:alter, @test_name, 'METHOD' => 'table_att_unset', 'NAME' => [ key_1, key_2 ])
       assert_no_match(eval("/" + key_1 + "/"), admin.describe(@test_name))
       assert_no_match(eval("/" + key_2 + "/"), admin.describe(@test_name))
     end
@@ -450,66 +443,66 @@ module Hbase
     #-------------------------------------------------------------------------------
     define_test "Snapshot should fail with non-string table name" do
       assert_raise(ArgumentError) do
-        admin.snapshot(123, 'xxx')
+        command(:snapshot, 123, 'xxx')
       end
     end
 
     define_test "Snapshot should fail with non-string snapshot name" do
       assert_raise(ArgumentError) do
-        admin.snapshot(@test_name, 123)
+        command(:snapshot, @test_name, 123)
       end
     end
 
     define_test "Snapshot should fail without snapshot name" do
       assert_raise(ArgumentError) do
-        admin.snapshot(@test_name)
+        command(:snapshot, @test_name)
       end
     end
 
     define_test "Snapshot should work with string args" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, @create_test_snapshot)
-      list = admin.list_snapshot(@create_test_snapshot)
+      command(:snapshot, @test_name, @create_test_snapshot)
+      list = command(:list_snapshots, @create_test_snapshot)
       assert_equal(1, list.size)
     end
 
     define_test "Snapshot should work when SKIP_FLUSH args" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, @create_test_snapshot, {SKIP_FLUSH => true})
-      list = admin.list_snapshot(@create_test_snapshot)
+      command(:snapshot, @test_name, @create_test_snapshot, {SKIP_FLUSH => true})
+      list = command(:list_snapshots, @create_test_snapshot)
       assert_equal(1, list.size)
     end
 
     define_test "List snapshot without any args" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, @create_test_snapshot)
-      list = admin.list_snapshot()
+      command(:snapshot, @test_name, @create_test_snapshot)
+      list = command(:list_snapshots)
       assert_equal(1, list.size)
     end
 
     define_test "List snapshot for a non-existing snapshot" do
-      list = admin.list_snapshot("xyz")
+      list = command(:list_snapshots, "xyz")
       assert_equal(0, list.size)
     end
 
     define_test "Restore snapshot without any args" do
       assert_raise(ArgumentError) do
-        admin.restore_snapshot()
+        command(:restore_snapshot)
       end
     end
 
     define_test "Restore snapshot should work" do
       drop_test_snapshot()
       restore_table = "test_restore_snapshot_table"
-      admin.create(restore_table, 'f1', 'f2')
+      command(:create, restore_table, 'f1', 'f2')
       assert_match(eval("/" + "f1" + "/"), admin.describe(restore_table))
       assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table))
-      admin.snapshot(restore_table, @create_test_snapshot)
-      admin.alter(restore_table, true, METHOD => 'delete', NAME => 'f1')
+      command(:snapshot, restore_table, @create_test_snapshot)
+      command(:alter, restore_table, METHOD => 'delete', NAME => 'f1')
       assert_no_match(eval("/" + "f1" + "/"), admin.describe(restore_table))
       assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table))
       drop_test_table(restore_table)
-      admin.restore_snapshot(@create_test_snapshot)
+      command(:restore_snapshot, @create_test_snapshot)
       assert_match(eval("/" + "f1" + "/"), admin.describe(restore_table))
       assert_match(eval("/" + "f2" + "/"), admin.describe(restore_table))
       drop_test_table(restore_table)
@@ -517,13 +510,13 @@ module Hbase
 
     define_test "Clone snapshot without any args" do
       assert_raise(ArgumentError) do
-        admin.restore_snapshot()
+        command(:restore_snapshot)
       end
     end
 
     define_test "Clone snapshot without table name args" do
       assert_raise(ArgumentError) do
-        admin.clone_snapshot(@create_test_snapshot)
+        command(:clone_snapshot, @create_test_snapshot)
       end
     end
 
@@ -532,8 +525,8 @@ module Hbase
       clone_table = "test_clone_snapshot_table"
       assert_match(eval("/" + "x" + "/"), admin.describe(@test_name))
       assert_match(eval("/" + "y" + "/"), admin.describe(@test_name))
-      admin.snapshot(@test_name, @create_test_snapshot)
-      admin.clone_snapshot(@create_test_snapshot, clone_table)
+      command(:snapshot, @test_name, @create_test_snapshot)
+      command(:clone_snapshot, @create_test_snapshot, clone_table)
       assert_match(eval("/" + "x" + "/"), admin.describe(clone_table))
       assert_match(eval("/" + "y" + "/"), admin.describe(clone_table))
       drop_test_table(clone_table)
@@ -547,11 +540,11 @@ module Hbase
 
     define_test "Delete snapshot should work" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, @create_test_snapshot)
-      list = admin.list_snapshot()
+      command(:snapshot, @test_name, @create_test_snapshot)
+      list = command(:list_snapshots)
       assert_equal(1, list.size)
       admin.delete_snapshot(@create_test_snapshot)
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(0, list.size)
     end
 
@@ -563,17 +556,17 @@ module Hbase
 
     define_test "Delete all snapshots should work" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, "delete_all_snapshot1")
-      admin.snapshot(@test_name, "delete_all_snapshot2")
-      admin.snapshot(@test_name, "snapshot_delete_all_1")
-      admin.snapshot(@test_name, "snapshot_delete_all_2")
-      list = admin.list_snapshot()
+      command(:snapshot, @test_name, "delete_all_snapshot1")
+      command(:snapshot, @test_name, "delete_all_snapshot2")
+      command(:snapshot, @test_name, "snapshot_delete_all_1")
+      command(:snapshot, @test_name, "snapshot_delete_all_2")
+      list = command(:list_snapshots)
       assert_equal(4, list.size)
       admin.delete_all_snapshot("d.*")
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(2, list.size)
       admin.delete_all_snapshot(".*")
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(0, list.size)
     end
 
@@ -585,48 +578,48 @@ module Hbase
 
     define_test "Delete table snapshots should work" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, "delete_table_snapshot1")
-      admin.snapshot(@test_name, "delete_table_snapshot2")
-      admin.snapshot(@test_name, "snapshot_delete_table1")
+      command(:snapshot, @test_name, "delete_table_snapshot1")
+      command(:snapshot, @test_name, "delete_table_snapshot2")
+      command(:snapshot, @test_name, "snapshot_delete_table1")
       new_table = "test_delete_table_snapshots_table"
-      admin.create(new_table, 'f1')
-      admin.snapshot(new_table, "delete_table_snapshot3")
-      list = admin.list_snapshot()
+      command(:create, new_table, 'f1')
+      command(:snapshot, new_table, "delete_table_snapshot3")
+      list = command(:list_snapshots)
       assert_equal(4, list.size)
       admin.delete_table_snapshots(@test_name, "d.*")
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(2, list.size)
       admin.delete_table_snapshots(@test_name)
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(1, list.size)
       admin.delete_table_snapshots(".*", "d.*")
-      list = admin.list_snapshot()
+      list = command(:list_snapshots)
       assert_equal(0, list.size)
       drop_test_table(new_table)
     end
 
     define_test "List table snapshots without any args" do
       assert_raise(ArgumentError) do
-        admin.list_table_snapshots()
+        command(:list_table_snapshots)
       end
     end
 
     define_test "List table snapshots should work" do
       drop_test_snapshot()
-      admin.snapshot(@test_name, "delete_table_snapshot1")
-      admin.snapshot(@test_name, "delete_table_snapshot2")
-      admin.snapshot(@test_name, "snapshot_delete_table1")
+      command(:snapshot, @test_name, "delete_table_snapshot1")
+      command(:snapshot, @test_name, "delete_table_snapshot2")
+      command(:snapshot, @test_name, "snapshot_delete_table1")
       new_table = "test_list_table_snapshots_table"
-      admin.create(new_table, 'f1')
-      admin.snapshot(new_table, "delete_table_snapshot3")
-      list = admin.list_table_snapshots(".*")
+      command(:create, new_table, 'f1')
+      command(:snapshot, new_table, "delete_table_snapshot3")
+      list = command(:list_table_snapshots, ".*")
       assert_equal(4, list.size)
-      list = admin.list_table_snapshots(@test_name, "d.*")
+      list = command(:list_table_snapshots, @test_name, "d.*")
       assert_equal(2, list.size)
-      list = admin.list_table_snapshots(@test_name)
+      list = command(:list_table_snapshots, @test_name)
       assert_equal(3, list.size)
       admin.delete_table_snapshots(".*")
-      list = admin.list_table_snapshots(".*", ".*")
+      list = command(:list_table_snapshots, ".*", ".*")
       assert_equal(0, list.size)
       drop_test_table(new_table)
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index d00dbc5..cf6eac2 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -33,25 +33,25 @@ module Hbase
 
       setup_hbase
 
-      assert_equal(0, replication_admin.list_peers.length)
+      assert_equal(0, command(:list_peers).length)
     end
 
     def teardown
-      assert_equal(0, replication_admin.list_peers.length)
+      assert_equal(0, command(:list_peers).length)
 
       shutdown
     end
 
     define_test "add_peer: should fail when args isn't specified" do
       assert_raise(ArgumentError) do
-        replication_admin.add_peer(@peer_id, nil)
+        command(:add_peer, @peer_id, nil)
       end
     end
 
     define_test "add_peer: fail when neither CLUSTER_KEY nor ENDPOINT_CLASSNAME are specified" do
       assert_raise(ArgumentError) do
         args = {}
-        replication_admin.add_peer(@peer_id, args)
+        command(:add_peer, @peer_id, args)
       end
     end
 
@@ -59,74 +59,74 @@ module Hbase
       assert_raise(ArgumentError) do
         args = { CLUSTER_KEY => 'zk1,zk2,zk3:2182:/hbase-prod',
                  ENDPOINT_CLASSNAME => 'org.apache.hadoop.hbase.MyReplicationEndpoint' }
-        replication_admin.add_peer(@peer_id, args)
+        command(:add_peer, @peer_id, args)
       end
     end
 
     define_test "add_peer: args must be a hash" do
       assert_raise(ArgumentError) do
-        replication_admin.add_peer(@peer_id, 1)
+        command(:add_peer, @peer_id, 1)
       end
       assert_raise(ArgumentError) do
-        replication_admin.add_peer(@peer_id, ['test'])
+        command(:add_peer, @peer_id, ['test'])
       end
       assert_raise(ArgumentError) do
-        replication_admin.add_peer(@peer_id, 'test')
+        command(:add_peer, @peer_id, 'test')
       end
     end
 
     define_test "add_peer: single zk cluster key" do
       cluster_key = "server1.cie.com:2181:/hbase"
 
-      replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
+      command(:add_peer, @peer_id, {CLUSTER_KEY => cluster_key})
 
-      assert_equal(1, replication_admin.list_peers.length)
-      assert(replication_admin.list_peers.key?(@peer_id))
-      assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
+      assert_equal(1, command(:list_peers).length)
+      assert(command(:list_peers).key?(@peer_id))
+      assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key)
 
       # cleanup for future tests
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "add_peer: multiple zk cluster key" do
       cluster_key = "zk1,zk2,zk3:2182:/hbase-prod"
 
-      replication_admin.add_peer(@peer_id, {CLUSTER_KEY => cluster_key})
+      command(:add_peer, @peer_id, {CLUSTER_KEY => cluster_key})
 
-      assert_equal(1, replication_admin.list_peers.length)
-      assert(replication_admin.list_peers.key?(@peer_id))
-      assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
+      assert_equal(1, command(:list_peers).length)
+      assert(command(:list_peers).key?(@peer_id))
+      assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key)
 
       # cleanup for future tests
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "add_peer: single zk cluster key - peer config" do
       cluster_key = "server1.cie.com:2181:/hbase"
 
       args = { CLUSTER_KEY => cluster_key }
-      replication_admin.add_peer(@peer_id, args)
+      command(:add_peer, @peer_id, args)
 
-      assert_equal(1, replication_admin.list_peers.length)
-      assert(replication_admin.list_peers.key?(@peer_id))
-      assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
+      assert_equal(1, command(:list_peers).length)
+      assert(command(:list_peers).key?(@peer_id))
+      assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key)
 
       # cleanup for future tests
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "add_peer: multiple zk cluster key - peer config" do
       cluster_key = "zk1,zk2,zk3:2182:/hbase-prod"
 
       args = { CLUSTER_KEY => cluster_key }
-      replication_admin.add_peer(@peer_id, args)
+      command(:add_peer, @peer_id, args)
 
-      assert_equal(1, replication_admin.list_peers.length)
-      assert(replication_admin.list_peers.key?(@peer_id))
-      assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
+      assert_equal(1, command(:list_peers).length)
+      assert(command(:list_peers).key?(@peer_id))
+      assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key)
 
       # cleanup for future tests
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "add_peer: multiple zk cluster key and table_cfs - peer config" do
@@ -135,15 +135,15 @@ module Hbase
       table_cfs_str = "default.table1;default.table3:cf1,cf2;default.table2:cf1"
 
       args = { CLUSTER_KEY => cluster_key, TABLE_CFS => table_cfs }
-      replication_admin.add_peer(@peer_id, args)
+      command(:add_peer, @peer_id, args)
 
-      assert_equal(1, replication_admin.list_peers.length)
-      assert(replication_admin.list_peers.key?(@peer_id))
-      assert_equal(cluster_key, replication_admin.list_peers.fetch(@peer_id).get_cluster_key)
-      assert_equal(table_cfs_str, replication_admin.show_peer_tableCFs(@peer_id))
+      assert_equal(1, command(:list_peers).length)
+      assert(command(:list_peers).key?(@peer_id))
+      assert_equal(cluster_key, command(:list_peers).fetch(@peer_id).get_cluster_key)
+      assert_equal(table_cfs_str, command(:show_peer_tableCFs, @peer_id))
 
       # cleanup for future tests
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "add_peer: should fail when args is a hash and peer_tableCFs provided" do
@@ -152,51 +152,51 @@ module Hbase
 
       assert_raise(ArgumentError) do
         args = { CLUSTER_KEY => cluster_key }
-        replication_admin.add_peer(@peer_id, args, table_cfs_str)
+        command(:add_peer, @peer_id, args, table_cfs_str)
       end
     end
 
     define_test "get_peer_config: works with simple clusterKey peer" do
       cluster_key = "localhost:2181:/hbase-test"
       args = { CLUSTER_KEY => cluster_key }
-      replication_admin.add_peer(@peer_id, args)
-      peer_config = replication_admin.get_peer_config(@peer_id)
+      command(:add_peer, @peer_id, args)
+      peer_config = command(:get_peer_config, @peer_id)
       assert_equal(cluster_key, peer_config.get_cluster_key)
       #cleanup
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "get_peer_config: works with replicationendpointimpl peer and config params" do
       repl_impl = "org.apache.hadoop.hbase.replication.ReplicationEndpointForTest"
       config_params = { "config1" => "value1", "config2" => "value2" }
       args = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params}
-      replication_admin.add_peer(@peer_id, args)
-      peer_config = replication_admin.get_peer_config(@peer_id)
+      command(:add_peer, @peer_id, args)
+      peer_config = command(:get_peer_config, @peer_id)
       assert_equal(repl_impl, peer_config.get_replication_endpoint_impl)
       assert_equal(2, peer_config.get_configuration.size)
       assert_equal("value1", peer_config.get_configuration.get("config1"))
       #cleanup
-      replication_admin.remove_peer(@peer_id)
+      command(:remove_peer, @peer_id)
     end
 
     define_test "list_peer_configs: returns all peers' ReplicationPeerConfig objects" do
       cluster_key = "localhost:2181:/hbase-test"
       args = { CLUSTER_KEY => cluster_key }
       peer_id_second = '2'
-      replication_admin.add_peer(@peer_id, args)
+      command(:add_peer, @peer_id, args)
 
       repl_impl = "org.apache.hadoop.hbase.replication.ReplicationEndpointForTest"
       config_params = { "config1" => "value1", "config2" => "value2" }
       args2 = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params}
-      replication_admin.add_peer(peer_id_second, args2)
+      command(:add_peer, peer_id_second, args2)
 
-      peer_configs = replication_admin.list_peer_configs
+      peer_configs = command(:list_peer_configs)
       assert_equal(2, peer_configs.size)
       assert_equal(cluster_key, peer_configs.get(@peer_id).get_cluster_key)
       assert_equal(repl_impl, peer_configs.get(peer_id_second).get_replication_endpoint_impl)
       #cleanup
-      replication_admin.remove_peer(@peer_id)
-      replication_admin.remove_peer(peer_id_second)
+      command(:remove_peer, @peer_id)
+      command(:remove_peer, peer_id_second)
     end
 
     define_test "update_peer_config: can update peer config and data" do
@@ -204,7 +204,7 @@ module Hbase
       config_params = { "config1" => "value1", "config2" => "value2" }
       data_params = {"data1" => "value1", "data2" => "value2"}
       args = { ENDPOINT_CLASSNAME => repl_impl, CONFIG => config_params, DATA => data_params}
-      replication_admin.add_peer(@peer_id, args)
+      command(:add_peer, @peer_id, args)
 
       #Normally the ReplicationSourceManager will call ReplicationPeer#peer_added, but here we have to do it ourselves
       replication_admin.peer_added(@peer_id)
@@ -212,12 +212,12 @@ module Hbase
       new_config_params = { "config1" => "new_value1" }
       new_data_params = {"data1" => "new_value1"}
       new_args = {CONFIG => new_config_params, DATA => new_data_params}
-      replication_admin.update_peer_config(@peer_id, new_args)
+      command(:update_peer_config, @peer_id, new_args)
 
       #Make sure the updated key/value pairs in config and data were successfully updated, and that those we didn't
       #update are still there and unchanged
-      peer_config = replication_admin.get_peer_config(@peer_id)
-      replication_admin.remove_peer(@peer_id)
+      peer_config = command(:get_peer_config, @peer_id)
+      command(:remove_peer, @peer_id)
       assert_equal("new_value1", peer_config.get_configuration.get("config1"))
       assert_equal("value2", peer_config.get_configuration.get("config2"))
       assert_equal("new_value1", Bytes.to_string(peer_config.get_peer_data.get(Bytes.toBytes("data1"))))
@@ -227,17 +227,17 @@ module Hbase
     # assert_raise fails on native exceptions - https://jira.codehaus.org/browse/JRUBY-5279
     # Can't catch native Java exception with assert_raise in JRuby 1.6.8 as in the test below.
     # define_test "add_peer: adding a second peer with same id should error" do
-    #   replication_admin.add_peer(@peer_id, '')
-    #   assert_equal(1, replication_admin.list_peers.length)
+    #   command(:add_peer, @peer_id, '')
+    #   assert_equal(1, command(:list_peers).length)
     #
     #   assert_raise(java.lang.IllegalArgumentException) do
-    #     replication_admin.add_peer(@peer_id, '')
+    #     command(:add_peer, @peer_id, '')
     #   end
     #
-    #   assert_equal(1, replication_admin.list_peers.length, 1)
+    #   assert_equal(1, command(:list_peers).length, 1)
     #
     #   # cleanup for future tests
-    #   replication_admin.remove_peer(@peer_id)
+    #   command(:remove_peer, @peer_id)
     # end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
index 0046909..b42290f 100644
--- a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
@@ -45,37 +45,37 @@ module Hbase
     define_test "Labels should be created as specified" do
       label = 'TEST_LABELS'
       count = table('hbase:labels')._count_internal
-      visibility_admin.add_labels('test_label')
+      command(:add_labels, 'test_label')
       assert_equal(count + 1, table('hbase:labels')._count_internal)
     end
 
     define_test "The set/clear methods should work with authorizations" do
       label = 'TEST_AUTHS'
       user = org.apache.hadoop.hbase.security.User.getCurrent().getName();
-      visibility_admin.add_labels(label)
+      command(:add_labels, label)
       $TEST_CLUSTER.waitLabelAvailable(10000, label)
-      count = visibility_admin.get_auths(user).length
+      count = command(:get_auths, user).length
 
       # verifying the set functionality
-      visibility_admin.set_auths(user, label)
-      assert_equal(count + 1, visibility_admin.get_auths(user).length)
+      command(:set_auths, user, label)
+      assert_equal(count + 1, command(:get_auths, user).length)
       assert_block do
-        visibility_admin.get_auths(user).any? {
+        command(:get_auths, user).any? {
           |auth| org.apache.hadoop.hbase.util.Bytes::toStringBinary(auth.toByteArray) == label
         }
       end
 
       # verifying the clear functionality
-      visibility_admin.clear_auths(user, label)
-      assert_equal(count, visibility_admin.get_auths(user).length)
+      command(:clear_auths, user, label)
+      assert_equal(count, command(:get_auths, user).length)
     end
 
     define_test "The get/put methods should work for data written with Visibility" do
       label = 'TEST_VISIBILITY'
       user = org.apache.hadoop.hbase.security.User.getCurrent().getName();
-      visibility_admin.add_labels(label)
+      command(:add_labels, label)
       $TEST_CLUSTER.waitLabelAvailable(10000, label)
-      visibility_admin.set_auths(user, label)
+      command(:set_auths, user, label)
 
       # verifying put functionality
       @test_table.put(1, "x:a", 31, {VISIBILITY=>label})

http://git-wip-us.apache.org/repos/asf/hbase/blob/15c03fd1/hbase-shell/src/test/ruby/test_helper.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb
index 0b3c420..ec6bb6a 100644
--- a/hbase-shell/src/test/ruby/test_helper.rb
+++ b/hbase-shell/src/test/ruby/test_helper.rb
@@ -43,13 +43,18 @@ module Hbase
 
     def setup_hbase
       hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
-      @shell = ::Shell::Shell.new(hbase)
+      @shell = ::Shell::Shell.new(hbase, interactive = false)
     end
     
     def shutdown
       @shell.hbase.shutdown
     end
 
+    # This function triggers exactly same path as the users.
+    def command(command, *args)
+      @shell.command(command, *args)
+    end
+
     def table(table)
       @shell.hbase_table(table)
     end
@@ -85,7 +90,7 @@ module Hbase
     def create_test_table(name)
       # Create the table if needed
       unless admin.exists?(name)
-        admin.create name, [{'NAME' => 'x', 'VERSIONS' => 5}, 'y']
+        command(:create, name, {'NAME' => 'x', 'VERSIONS' => 5}, 'y')
         return
       end
 
@@ -98,7 +103,7 @@ module Hbase
     def create_test_table_with_splits(name, splits)
       # Create the table if needed
       unless admin.exists?(name)
-        admin.create name, 'f1', splits
+        command(:create, name, 'f1', splits)
       end
 
       # Enable the table if needed
@@ -132,6 +137,18 @@ module Hbase
         puts "IGNORING DELETE ALL SNAPSHOT ERROR: #{e}"
       end
     end
+
+
+    def capture_stdout
+      begin
+        old_stdout = $stdout
+        $stdout = StringIO.new('','w')
+        yield
+        $stdout.string
+      ensure
+        $stdout = old_stdout
+      end
+    end
   end
 end
 


[32/50] hbase git commit: HBASE-15967 Metric for active ipc Readers and make default fraction of cpu count Add new metric hbase.regionserver.ipc.runningReaders Also make it so Reader count is a factor of processor count

Posted by sy...@apache.org.
HBASE-15967 Metric for active ipc Readers and make default fraction of cpu count
Add new metric hbase.regionserver.ipc.runningReaders
Also make it so Reader count is a factor of processor count


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1125215a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1125215a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1125215a

Branch: refs/heads/hbase-12439
Commit: 1125215aad3f5b149f3458ba7019c5920f6dca66
Parents: e0b70c0
Author: stack <st...@apache.org>
Authored: Sun Jun 5 11:12:05 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 13:10:14 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/MetricsHBaseServerSource.java     | 11 +++++++---
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 19 ++++++++++++++++
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 23 +++++++++++++++-----
 3 files changed, 45 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/1125215a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index ce57e0f..43515cd 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -86,6 +86,13 @@ public interface MetricsHBaseServerSource extends BaseSource {
   String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " +
       "rest of the requests will have to be retried.";
 
+  String RUNNING_READERS = "runningReaders";
+  String RUNNING_READERS_DESCRIPTION =
+      "Count of Reader threads currently busy parsing requests to hand off to the scheduler";
+
+  void incrRunningReaders();
+  void decrRunningReaders();
+
   void authorizationSuccess();
 
   void authorizationFailure();
@@ -122,6 +129,4 @@ public interface MetricsHBaseServerSource extends BaseSource {
   void processedCall(int processingTime);
 
   void queuedAndProcessedCall(int totalTime);
-
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/1125215a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
index c72641d..24cc0fb 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java
@@ -57,6 +57,12 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
   private MetricHistogram requestSize;
   private MetricHistogram responseSize;
 
+  /**
+   * The count of readers currently working parsing a request as opposed to being blocked on the
+   * selector waiting on requests to come in.
+   */
+  private final MutableFastCounter runningReaders;
+
   public MetricsHBaseServerSourceImpl(String metricsName,
                                       String metricsDescription,
                                       String metricsContext,
@@ -86,6 +92,9 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
     this.exceptionsMultiTooLarge = this.getMetricsRegistry()
         .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L);
 
+    this.runningReaders = this.getMetricsRegistry()
+        .newCounter(RUNNING_READERS, RUNNING_READERS_DESCRIPTION, 0L);
+
     this.authenticationSuccesses = this.getMetricsRegistry().newCounter(
         AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L);
     this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME,
@@ -109,6 +118,16 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
   }
 
   @Override
+  public void incrRunningReaders() {
+    this.runningReaders.incr(+1);
+  }
+
+  @Override
+  public void decrRunningReaders() {
+    this.runningReaders.incr(-1);
+  }
+
+  @Override
   public void authorizationSuccess() {
     authorizationSuccesses.incr();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1125215a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index aca3fdd..c9d2639 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -625,7 +625,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     public Listener(final String name) throws IOException {
       super(name);
-      // The backlog of requests that we will have the serversocket carry.
+      // The backlog of requests that we will have the serversocket carry. It is not enough
+      // just setting this config. You need to set the backlog in the kernel too.
       int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
       // Create a new server socket and set to non blocking mode
       acceptChannel = ServerSocketChannel.open();
@@ -690,7 +691,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
               iter.remove();
               if (key.isValid()) {
                 if (key.isReadable()) {
-                  doRead(key);
+                  metrics.getMetricsSource().incrRunningReaders();
+                  try {
+                    doRead(key);
+                  } finally {
+                    metrics.getMetricsSource().decrRunningReaders();
+                  }
                 }
               }
               key = null;
@@ -734,8 +740,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             iter.remove();
             try {
               if (key.isValid()) {
-                if (key.isAcceptable())
+                if (key.isAcceptable()) {
                   doAccept(key);
+                }
               }
             } catch (IOException ignored) {
               if (LOG.isTraceEnabled()) LOG.trace("ignored", ignored);
@@ -830,7 +837,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       try {
         count = c.readAndProcess();
       } catch (InterruptedException ieo) {
-        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
+        LOG.info(Thread.currentThread().getName() +
+            ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         if (LOG.isDebugEnabled()) {
@@ -1159,6 +1167,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private ByteBuffer dataLengthBuffer;
     protected final ConcurrentLinkedDeque<Call> responseQueue = new ConcurrentLinkedDeque<Call>();
     private final Lock responseWriteLock = new ReentrantLock();
+    // EXPENSIVE: Counters cost lots of CPU. Remove. Used just to see if idle or not. Use boolean.
     private Counter rpcCount = new Counter(); // number of outstanding rpcs
     private long lastContact;
     private InetAddress addr;
@@ -2000,7 +2009,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     // See declaration above for documentation on what this size is.
     this.maxQueueSizeInBytes =
       this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
-    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
+    // Have the Reader thread count default to 1/4 of the processors. This seems to do pretty
+    // well. See the metric hbase.regionserver.ipc.runningReaders to see if you need to change it.
+    int processors = Runtime.getRuntime().availableProcessors();
+    this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size",
+        Math.max(8, processors/ 4));
     this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
       2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);


[48/50] hbase git commit: HBASE-15958 Implement ClaimQueues on top of HBase

Posted by sy...@apache.org.
HBASE-15958 Implement ClaimQueues on top of HBase

Building on HBase-15883.
Now implementing the claim queues procedure within an HBase table.
Also added UnitTests to test claimQueue.
Peer tracking will still be performed by ZooKeeper though.
Also modified the queueId tracking procedure so we no longer have to perform scans over the Replication Table.
This does make our queue naming schema slightly different from ReplicationQueuesZKImpl though.

Signed-off-by: Elliott Clark <ec...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/babdedc1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/babdedc1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/babdedc1

Branch: refs/heads/hbase-12439
Commit: babdedc1b0f0159eb526fb5c9ee08525de7ce404
Parents: 108d39a
Author: Joseph Hwang <jz...@fb.com>
Authored: Thu May 19 17:14:33 2016 -0700
Committer: Elliott Clark <ec...@apache.org>
Committed: Thu Jun 9 15:05:54 2016 -0700

----------------------------------------------------------------------
 .../hbase/replication/ReplicationQueues.java    |   8 +-
 .../replication/ReplicationQueuesArguments.java |   4 +-
 .../replication/ReplicationQueuesHBaseImpl.java | 485 ++++++++++++-------
 .../replication/ReplicationQueuesZKImpl.java    |  26 +-
 .../regionserver/ReplicationSourceManager.java  |   7 +-
 .../replication/TestReplicationStateBasic.java  |   6 +-
 .../TestReplicationStateHBaseImpl.java          | 302 +++++++++---
 .../replication/TestReplicationStateZKImpl.java |   1 -
 .../TestReplicationSourceManager.java           |  12 +-
 9 files changed, 579 insertions(+), 272 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
index 809b122..0de0cc8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
@@ -19,8 +19,8 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.util.List;
-import java.util.SortedMap;
-import java.util.SortedSet;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
@@ -96,10 +96,10 @@ public interface ReplicationQueues {
   /**
    * Take ownership for the set of queues belonging to a dead region server.
    * @param regionserver the id of the dead region server
-   * @return A SortedMap of the queues that have been claimed, including a SortedSet of WALs in
+   * @return A Map of the queues that have been claimed, including a Set of WALs in
    *         each queue. Returns an empty map if no queues were failed-over.
    */
-  SortedMap<String, SortedSet<String>> claimQueues(String regionserver);
+  Map<String, Set<String>> claimQueues(String regionserver);
 
   /**
    * Get a list of all region servers that have outstanding replication queues. These servers could

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
index 4907b73..4fdc4e7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
@@ -56,11 +56,11 @@ public class ReplicationQueuesArguments {
     this.conf = conf;
   }
 
-  public Abortable getAbort() {
+  public Abortable getAbortable() {
     return abort;
   }
 
-  public void setAbort(Abortable abort) {
+  public void setAbortable(Abortable abort) {
     this.abort = abort;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
index 29f0632..34a5289 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
@@ -19,6 +19,8 @@
 
 package org.apache.hadoop.hbase.replication;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import org.apache.hadoop.hbase.Abortable;
@@ -41,25 +43,41 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.filter.FilterList;
-import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.RetryCounterFactory;
+import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
 import sun.reflect.generics.reflectiveObjects.NotImplementedException;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.SortedMap;
-import java.util.SortedSet;
+import java.util.Set;
+
+/**
+ * This class provides an implementation of the ReplicationQueues interface using an HBase table
+ * "Replication Table". The basic schema of this table will store each individual queue as a
+ * seperate row. The row key will be a unique identifier of the creating server's name and the
+ * queueId. Each queue must have the following two columns:
+ *  COL_OWNER: tracks which server is currently responsible for tracking the queue
+ *  COL_QUEUE_ID: tracks the queue's id as stored in ReplicationSource
+ * They will also have columns mapping [WAL filename : offset]
+ * One key difference from the ReplicationQueuesZkImpl is that when queues are reclaimed we
+ * simply return its HBase row key as its new "queueId"
+ */
 
 @InterfaceAudience.Private
-public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
+public class ReplicationQueuesHBaseImpl extends ReplicationStateZKBase
+    implements ReplicationQueues {
+
+  private static final Log LOG = LogFactory.getLog(ReplicationQueuesHBaseImpl.class);
 
   /** Name of the HBase Table used for tracking replication*/
   public static final TableName REPLICATION_TABLE_NAME =
@@ -68,7 +86,12 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
   // Column family and column names for the Replication Table
   private static final byte[] CF = Bytes.toBytes("r");
   private static final byte[] COL_OWNER = Bytes.toBytes("o");
-  private static final byte[] COL_QUEUE_ID = Bytes.toBytes("q");
+  private static final byte[] COL_OWNER_HISTORY = Bytes.toBytes("h");
+
+  // The value used to delimit the queueId and server name inside of a queue's row key. Currently a
+  // hyphen, because it is guaranteed that queueId (which is a cluster id) cannot contain hyphens.
+  // See HBASE-11394.
+  private static String ROW_KEY_DELIMITER = "-";
 
   // Column Descriptor for the Replication Table
   private static final HColumnDescriptor REPLICATION_COL_DESCRIPTOR =
@@ -80,7 +103,8 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
       .setCacheDataInL1(true);
 
   // Common byte values used in replication offset tracking
-  private static final byte[] INITIAL_OFFSET = Bytes.toBytes(0L);
+  private static final byte[] INITIAL_OFFSET_BYTES = Bytes.toBytes(0L);
+  private static final byte[] EMPTY_STRING_BYTES = Bytes.toBytes("");
 
   /*
    * Make sure that HBase table operations for replication have a high number of retries. This is
@@ -92,104 +116,92 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
   private static final int RPC_TIMEOUT = 2000;
   private static final int OPERATION_TIMEOUT = CLIENT_RETRIES * RPC_TIMEOUT;
 
-  private final Configuration conf;
-  private final Admin admin;
-  private final Connection connection;
-  private final Table replicationTable;
-  private final Abortable abortable;
+  private Configuration modifiedConf;
+  private Admin admin;
+  private Connection connection;
+  private Table replicationTable;
   private String serverName = null;
   private byte[] serverNameBytes = null;
 
-  public ReplicationQueuesHBaseImpl(ReplicationQueuesArguments args) throws IOException {
-    this(args.getConf(), args.getAbort());
+  public ReplicationQueuesHBaseImpl(ReplicationQueuesArguments args) {
+    this(args.getConf(), args.getAbortable(), args.getZk());
   }
 
-  public ReplicationQueuesHBaseImpl(Configuration conf, Abortable abort) throws IOException {
-    this.conf = new Configuration(conf);
-    // Modify the connection's config so that the Replication Table it returns has a much higher
-    // number of client retries
-    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES);
-    this.connection = ConnectionFactory.createConnection(conf);
-    this.admin = connection.getAdmin();
-    this.abortable = abort;
-    replicationTable = createAndGetReplicationTable();
-    replicationTable.setRpcTimeout(RPC_TIMEOUT);
-    replicationTable.setOperationTimeout(OPERATION_TIMEOUT);
+  public ReplicationQueuesHBaseImpl(Configuration conf, Abortable abort, ZooKeeperWatcher zkw) {
+    super(zkw, conf, abort);
+    modifiedConf = new Configuration(conf);
+    modifiedConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, CLIENT_RETRIES);
   }
 
   @Override
   public void init(String serverName) throws ReplicationException {
-    this.serverName = serverName;
-    this.serverNameBytes = Bytes.toBytes(serverName);
+    try {
+      this.serverName = serverName;
+      this.serverNameBytes = Bytes.toBytes(serverName);
+      // Modify the connection's config so that the Replication Table it returns has a much higher
+      // number of client retries
+      this.connection = ConnectionFactory.createConnection(modifiedConf);
+      this.admin = connection.getAdmin();
+      replicationTable = createAndGetReplicationTable();
+      replicationTable.setRpcTimeout(RPC_TIMEOUT);
+      replicationTable.setOperationTimeout(OPERATION_TIMEOUT);
+    } catch (IOException e) {
+      throw new ReplicationException(e);
+    }
   }
 
   @Override
   public void removeQueue(String queueId) {
+
     try {
-      byte[] rowKey = this.queueIdToRowKey(queueId);
-      // The rowkey will be null if the queue cannot be found in the Replication Table
-      if (rowKey == null) {
-        String errMsg = "Could not remove non-existent queue with queueId=" + queueId;
-        abortable.abort(errMsg, new ReplicationException(errMsg));
-        return;
-      }
+      byte[] rowKey = queueIdToRowKey(queueId);
       Delete deleteQueue = new Delete(rowKey);
       safeQueueUpdate(deleteQueue);
-    } catch (IOException e) {
-      abortable.abort("Could not remove queue with queueId=" + queueId, e);
+    } catch (IOException | ReplicationException e) {
+      String errMsg = "Failed removing queue queueId=" + queueId;
+      abortable.abort(errMsg, e);
     }
   }
 
   @Override
   public void addLog(String queueId, String filename) throws ReplicationException {
     try {
-      // Check if the queue info (Owner, QueueId) is currently stored in the Replication Table
-      if (this.queueIdToRowKey(queueId) == null) {
-        // Each queue will have an Owner, QueueId, and a collection of [WAL:offset] key values.
-        Put putNewQueue = new Put(Bytes.toBytes(buildServerQueueName(queueId)));
-        putNewQueue.addColumn(CF, COL_OWNER, Bytes.toBytes(serverName));
-        putNewQueue.addColumn(CF, COL_QUEUE_ID, Bytes.toBytes(queueId));
-        putNewQueue.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET);
+      if (!checkQueueExists(queueId)) {
+        // Each queue will have an Owner, OwnerHistory, and a collection of [WAL:offset] key values
+        Put putNewQueue = new Put(Bytes.toBytes(buildQueueRowKey(queueId)));
+        putNewQueue.addColumn(CF, COL_OWNER, serverNameBytes);
+        putNewQueue.addColumn(CF, COL_OWNER_HISTORY, EMPTY_STRING_BYTES);
+        putNewQueue.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES);
         replicationTable.put(putNewQueue);
       } else {
         // Otherwise simply add the new log and offset as a new column
-        Put putNewLog = new Put(this.queueIdToRowKey(queueId));
-        putNewLog.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET);
+        Put putNewLog = new Put(queueIdToRowKey(queueId));
+        putNewLog.addColumn(CF, Bytes.toBytes(filename), INITIAL_OFFSET_BYTES);
         safeQueueUpdate(putNewLog);
       }
-    } catch (IOException e) {
-      abortable.abort("Could not add queue queueId=" + queueId + " filename=" + filename, e);
+    } catch (IOException | ReplicationException e) {
+      String errMsg = "Failed adding log queueId=" + queueId + " filename=" + filename;
+      abortable.abort(errMsg, e);
     }
   }
 
   @Override
   public void removeLog(String queueId, String filename) {
     try {
-      byte[] rowKey = this.queueIdToRowKey(queueId);
-      if (rowKey == null) {
-        String errMsg = "Could not remove log from non-existent queueId=" + queueId + ", filename="
-          + filename;
-        abortable.abort(errMsg, new ReplicationException(errMsg));
-        return;
-      }
+      byte[] rowKey = queueIdToRowKey(queueId);
       Delete delete = new Delete(rowKey);
       delete.addColumns(CF, Bytes.toBytes(filename));
       safeQueueUpdate(delete);
-    } catch (IOException e) {
-      abortable.abort("Could not remove log from queueId=" + queueId + ", filename=" + filename, e);
+    } catch (IOException | ReplicationException e) {
+      String errMsg = "Failed removing log queueId=" + queueId + " filename=" + filename;
+      abortable.abort(errMsg, e);
     }
   }
 
   @Override
   public void setLogPosition(String queueId, String filename, long position) {
     try {
-      byte[] rowKey = this.queueIdToRowKey(queueId);
-      if (rowKey == null) {
-        String errMsg = "Could not set position of log from non-existent queueId=" + queueId +
-          ", filename=" + filename;
-        abortable.abort(errMsg, new ReplicationException(errMsg));
-        return;
-      }
+      byte[] rowKey = queueIdToRowKey(queueId);
       // Check that the log exists. addLog() must have been called before setLogPosition().
       Get checkLogExists = new Get(rowKey);
       checkLogExists.addColumn(CF, Bytes.toBytes(filename));
@@ -203,24 +215,21 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
       Put walAndOffset = new Put(rowKey);
       walAndOffset.addColumn(CF, Bytes.toBytes(filename), Bytes.toBytes(position));
       safeQueueUpdate(walAndOffset);
-    } catch (IOException e) {
-      abortable.abort("Failed to write replication wal position (filename=" + filename +
-          ", position=" + position + ")", e);
+    } catch (IOException | ReplicationException e) {
+      String errMsg = "Failed writing log position queueId=" + queueId + "filename=" +
+        filename + " position=" + position;
+      abortable.abort(errMsg, e);
     }
   }
 
   @Override
   public long getLogPosition(String queueId, String filename) throws ReplicationException {
     try {
-      byte[] rowKey = this.queueIdToRowKey(queueId);
-      if (rowKey == null) {
-        throw new ReplicationException("Could not get position in log for non-existent queue " +
-            "queueId=" + queueId + ", filename=" + filename);
-      }
+      byte[] rowKey = queueIdToRowKey(queueId);
       Get getOffset = new Get(rowKey);
       getOffset.addColumn(CF, Bytes.toBytes(filename));
-      Result result = replicationTable.get(getOffset);
-      if (result.isEmpty()) {
+      Result result = getResultIfOwner(getOffset);
+      if (result == null || !result.containsColumn(CF, Bytes.toBytes(filename))) {
         throw new ReplicationException("Could not read empty result while getting log position " +
             "queueId=" + queueId + ", filename=" + filename);
       }
@@ -241,53 +250,117 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
 
   @Override
   public List<String> getLogsInQueue(String queueId) {
-    List<String> logs = new ArrayList<String>();
+    byte[] rowKey = queueIdToRowKey(queueId);
+    return getLogsInQueue(rowKey);
+  }
+
+  private List<String> getLogsInQueue(byte[] rowKey) {
+    String errMsg = "Could not get logs in queue queueId=" + Bytes.toString(rowKey);
     try {
-      byte[] rowKey = this.queueIdToRowKey(queueId);
-      if (rowKey == null) {
-        String errMsg = "Could not get logs from non-existent queueId=" + queueId;
-        abortable.abort(errMsg, new ReplicationException(errMsg));
-        return null;
-      }
       Get getQueue = new Get(rowKey);
-      Result queue = replicationTable.get(getQueue);
-      if (queue.isEmpty()) {
+      Result queue = getResultIfOwner(getQueue);
+      // The returned queue could be null if we have lost ownership of it
+      if (queue == null) {
+        abortable.abort(errMsg, new ReplicationException(errMsg));
         return null;
       }
-      Map<byte[], byte[]> familyMap = queue.getFamilyMap(CF);
-      for (byte[] cQualifier : familyMap.keySet()) {
-        if (Arrays.equals(cQualifier, COL_OWNER) || Arrays.equals(cQualifier, COL_QUEUE_ID)) {
-          continue;
-        }
-        logs.add(Bytes.toString(cQualifier));
-      }
+      return readWALsFromResult(queue);
     } catch (IOException e) {
-      abortable.abort("Could not get logs from queue queueId=" + queueId, e);
+      abortable.abort(errMsg, e);
       return null;
     }
-    return logs;
   }
 
   @Override
   public List<String> getAllQueues() {
+    List<String> allQueues = new ArrayList<String>();
+    ResultScanner queueScanner = null;
     try {
-      return this.getQueuesBelongingToServer(serverName);
+      queueScanner = this.getQueuesBelongingToServer(serverName);
+      for (Result queue : queueScanner) {
+        String rowKey =  Bytes.toString(queue.getRow());
+        // If the queue does not have a Owner History, then we must be its original owner. So we
+        // want to return its queueId in raw form
+        if (Bytes.toString(queue.getValue(CF, COL_OWNER_HISTORY)).length() == 0) {
+          allQueues.add(getRawQueueIdFromRowKey(rowKey));
+        } else {
+          allQueues.add(rowKey);
+        }
+      }
+      return allQueues;
     } catch (IOException e) {
-      abortable.abort("Could not get all replication queues", e);
+      String errMsg = "Failed getting list of all replication queues";
+      abortable.abort(errMsg, e);
       return null;
+    } finally {
+      if (queueScanner != null) {
+        queueScanner.close();
+      }
     }
   }
 
   @Override
-  public SortedMap<String, SortedSet<String>> claimQueues(String regionserver) {
-    // TODO
-    throw new NotImplementedException();
+  public Map<String, Set<String>> claimQueues(String regionserver) {
+    Map<String, Set<String>> queues = new HashMap<>();
+    if (isThisOurRegionServer(regionserver)) {
+      return queues;
+    }
+    ResultScanner queuesToClaim = null;
+    try {
+      queuesToClaim = this.getQueuesBelongingToServer(regionserver);
+      for (Result queue : queuesToClaim) {
+        if (attemptToClaimQueue(queue, regionserver)) {
+          String rowKey = Bytes.toString(queue.getRow());
+          ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(rowKey);
+          if (peerExists(replicationQueueInfo.getPeerId())) {
+            Set<String> sortedLogs = new HashSet<String>();
+            List<String> logs = getLogsInQueue(queue.getRow());
+            for (String log : logs) {
+              sortedLogs.add(log);
+            }
+            queues.put(rowKey, sortedLogs);
+            LOG.info(serverName + " has claimed queue " + rowKey + " from " + regionserver);
+          } else {
+            // Delete orphaned queues
+            removeQueue(Bytes.toString(queue.getRow()));
+            LOG.info(serverName + " has deleted abandoned queue " + rowKey + " from " +
+                regionserver);
+          }
+        }
+      }
+    } catch (IOException | KeeperException e) {
+      String errMsg = "Failed claiming queues for regionserver=" + regionserver;
+      abortable.abort(errMsg, e);
+      queues.clear();
+    } finally {
+      if (queuesToClaim != null) {
+        queuesToClaim.close();
+      }
+    }
+    return queues;
   }
 
   @Override
   public List<String> getListOfReplicators() {
-    // TODO
-    throw new NotImplementedException();
+    // scan all of the queues and return a list of all unique OWNER values
+    Set<String> peerServers = new HashSet<String>();
+    ResultScanner allQueuesInCluster = null;
+    try {
+      Scan scan = new Scan();
+      scan.addColumn(CF, COL_OWNER);
+      allQueuesInCluster = replicationTable.getScanner(scan);
+      for (Result queue : allQueuesInCluster) {
+        peerServers.add(Bytes.toString(queue.getValue(CF, COL_OWNER)));
+      }
+    } catch (IOException e) {
+      String errMsg = "Failed getting list of replicators";
+      abortable.abort(errMsg, e);
+    } finally {
+      if (allQueuesInCluster != null) {
+        allQueuesInCluster.close();
+      }
+    }
+    return new ArrayList<String>(peerServers);
   }
 
   @Override
@@ -363,6 +436,7 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
   /**
    * Create the replication table with the provided HColumnDescriptor REPLICATION_COL_DESCRIPTOR
    * in ReplicationQueuesHBaseImpl
+   *
    * @throws IOException
    */
   private void createReplicationTable() throws IOException {
@@ -372,41 +446,49 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
   }
 
   /**
-   * Builds the unique identifier for a queue in the Replication table by appending the queueId to
-   * the servername
-   *
-   * @param queueId a String that identifies the queue
-   * @return unique identifier for a queue in the Replication table
+   * Build the row key for the given queueId. This will uniquely identify it from all other queues
+   * in the cluster.
+   * @param serverName The owner of the queue
+   * @param queueId String identifier of the queue
+   * @return String representation of the queue's row key
+   */
+  private String buildQueueRowKey(String serverName, String queueId) {
+    return queueId + ROW_KEY_DELIMITER + serverName;
+  }
+
+  private String buildQueueRowKey(String queueId) {
+    return buildQueueRowKey(serverName, queueId);
+  }
+
+  /**
+   * Parse the original queueId from a row key
+   * @param rowKey String representation of a queue's row key
+   * @return the original queueId
    */
-  private String buildServerQueueName(String queueId) {
-    return serverName + "-" + queueId;
+  private String getRawQueueIdFromRowKey(String rowKey) {
+    return rowKey.split(ROW_KEY_DELIMITER)[0];
   }
-  
+
   /**
    * See safeQueueUpdate(RowMutations mutate)
+   *
    * @param put Row mutation to perform on the queue
    */
-  private void safeQueueUpdate(Put put) {
+  private void safeQueueUpdate(Put put) throws ReplicationException, IOException {
     RowMutations mutations = new RowMutations(put.getRow());
-    try {
-      mutations.add(put);
-    } catch (IOException e){
-      abortable.abort("Failed to update Replication Table because of IOException", e);
-    }
+    mutations.add(put);
     safeQueueUpdate(mutations);
   }
 
   /**
    * See safeQueueUpdate(RowMutations mutate)
+   *
    * @param delete Row mutation to perform on the queue
    */
-  private void safeQueueUpdate(Delete delete) {
+  private void safeQueueUpdate(Delete delete) throws ReplicationException,
+      IOException{
     RowMutations mutations = new RowMutations(delete.getRow());
-    try {
-      mutations.add(delete);
-    } catch (IOException e) {
-      abortable.abort("Failed to update Replication Table because of IOException", e);
-    }
+    mutations.add(delete);
     safeQueueUpdate(mutations);
   }
 
@@ -417,16 +499,30 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
    *
    * @param mutate Mutation to perform on a given queue
    */
-  private void safeQueueUpdate(RowMutations mutate) {
-    try {
-      boolean updateSuccess = replicationTable.checkAndMutate(mutate.getRow(), CF, COL_OWNER,
-        CompareFilter.CompareOp.EQUAL, serverNameBytes, mutate);
-      if (!updateSuccess) {
-        String errMsg = "Failed to update Replication Table because we lost queue ownership";
-        abortable.abort(errMsg, new ReplicationException(errMsg));
-      }
-    } catch (IOException e) {
-      abortable.abort("Failed to update Replication Table because of IOException", e);
+  private void safeQueueUpdate(RowMutations mutate) throws ReplicationException, IOException{
+    boolean updateSuccess = replicationTable.checkAndMutate(mutate.getRow(), CF, COL_OWNER,
+      CompareFilter.CompareOp.EQUAL, serverNameBytes, mutate);
+    if (!updateSuccess) {
+      throw new ReplicationException("Failed to update Replication Table because we lost queue " +
+        " ownership");
+    }
+  }
+
+  /**
+   * Returns a queue's row key given either its raw or reclaimed queueId
+   *
+   * @param queueId queueId of the queue
+   * @return byte representation of the queue's row key
+   */
+  private byte[] queueIdToRowKey(String queueId) {
+    // Cluster id's are guaranteed to have no hyphens, so if the passed in queueId has no hyphen
+    // then this is not a reclaimed queue.
+    if (!queueId.contains(ROW_KEY_DELIMITER)) {
+      return Bytes.toBytes(buildQueueRowKey(queueId));
+      // If the queueId contained some hyphen it was reclaimed. In this case, the queueId is the
+      // queue's row key
+    } else {
+      return Bytes.toBytes(queueId);
     }
   }
 
@@ -434,64 +530,115 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
    * Get the QueueIds belonging to the named server from the ReplicationTable
    *
    * @param server name of the server
-   * @return a list of the QueueIds belonging to the server
+   * @return a ResultScanner over the QueueIds belonging to the server
    * @throws IOException
    */
-  private List<String> getQueuesBelongingToServer(String server) throws IOException {
-    List<String> queues = new ArrayList<String>();
+  private ResultScanner getQueuesBelongingToServer(String server) throws IOException {
     Scan scan = new Scan();
     SingleColumnValueFilter filterMyQueues = new SingleColumnValueFilter(CF, COL_OWNER,
       CompareFilter.CompareOp.EQUAL, Bytes.toBytes(server));
     scan.setFilter(filterMyQueues);
-    scan.addColumn(CF, COL_QUEUE_ID);
     scan.addColumn(CF, COL_OWNER);
+    scan.addColumn(CF, COL_OWNER_HISTORY);
     ResultScanner results = replicationTable.getScanner(scan);
-    for (Result result : results) {
-      queues.add(Bytes.toString(result.getValue(CF, COL_QUEUE_ID)));
+    return results;
+  }
+
+  /**
+   * Check if the queue specified by queueId is stored in HBase
+   *
+   * @param queueId Either raw or reclaimed format of the queueId
+   * @return Whether the queue is stored in HBase
+   * @throws IOException
+   */
+  private boolean checkQueueExists(String queueId) throws IOException {
+    byte[] rowKey = queueIdToRowKey(queueId);
+    return replicationTable.exists(new Get(rowKey));
+  }
+
+  /**
+   * Read all of the WAL's from a queue into a list
+   *
+   * @param queue HBase query result containing the queue
+   * @return a list of all the WAL filenames
+   */
+  private List<String> readWALsFromResult(Result queue) {
+    List<String> wals = new ArrayList<>();
+    Map<byte[], byte[]> familyMap = queue.getFamilyMap(CF);
+    for(byte[] cQualifier : familyMap.keySet()) {
+      // Ignore the meta data fields of the queue
+      if (Arrays.equals(cQualifier, COL_OWNER) || Arrays.equals(cQualifier, COL_OWNER_HISTORY)) {
+        continue;
+      }
+      wals.add(Bytes.toString(cQualifier));
     }
-    results.close();
-    return queues;
+    return wals;
   }
 
   /**
-   * Finds the row key of the HBase row corresponding to the provided queue. This has to be done,
-   * because the row key is [original server name + "-" + queueId0]. And the original server will
-   * make calls to getLog(), getQueue(), etc. with the argument queueId = queueId0.
-   * On the original server we can build the row key by concatenating servername + queueId0.
-   * Yet if the queue is claimed by another server, future calls to getLog(), getQueue(), etc.
-   * will be made with the argument queueId = queueId0 + "-" + pastOwner0 + "-" + pastOwner1 ...
-   * so we need a way to look up rows by their modified queueId's.
+   * Attempt to claim the given queue with a checkAndPut on the OWNER column. We check that the
+   * recently killed server is still the OWNER before we claim it.
    *
-   * TODO: Consider updating the queueId passed to getLog, getQueue()... inside of ReplicationSource
-   * TODO: and ReplicationSourceManager or the parsing of the passed in queueId's so that we don't
-   * TODO have to scan the table for row keys for each update. See HBASE-15956.
+   * @param queue The queue that we are trying to claim
+   * @param originalServer The server that originally owned the queue
+   * @return Whether we successfully claimed the queue
+   * @throws IOException
+   */
+  private boolean attemptToClaimQueue (Result queue, String originalServer) throws IOException{
+    Put putQueueNameAndHistory = new Put(queue.getRow());
+    putQueueNameAndHistory.addColumn(CF, COL_OWNER, Bytes.toBytes(serverName));
+    String newOwnerHistory = buildClaimedQueueHistory(Bytes.toString(queue.getValue(CF,
+      COL_OWNER_HISTORY)), originalServer);
+    putQueueNameAndHistory.addColumn(CF, COL_OWNER_HISTORY, Bytes.toBytes(newOwnerHistory));
+    RowMutations claimAndRenameQueue = new RowMutations(queue.getRow());
+    claimAndRenameQueue.add(putQueueNameAndHistory);
+    // Attempt to claim ownership for this queue by checking if the current OWNER is the original
+    // server. If it is not then another RS has already claimed it. If it is we set ourselves as the
+    // new owner and update the queue's history
+    boolean success = replicationTable.checkAndMutate(queue.getRow(), CF, COL_OWNER,
+      CompareFilter.CompareOp.EQUAL, Bytes.toBytes(originalServer), claimAndRenameQueue);
+    return success;
+  }
+
+  /**
+   * Creates a "|" delimited record of the queue's past region server owners.
    *
-   * TODO: We can also cache queueId's if ReplicationQueuesHBaseImpl becomes a bottleneck. We
-   * TODO: currently perform scan's over all the rows looking for one with a matching QueueId.
+   * @param originalHistory the queue's original owner history
+   * @param oldServer the name of the server that used to own the queue
+   * @return the queue's new owner history
+   */
+  private String buildClaimedQueueHistory(String originalHistory, String oldServer) {
+    return originalHistory + "|" + oldServer;
+  }
+
+  /**
+   * Attempts to run a Get on some queue. Will only return a non-null result if we currently own
+   * the queue.
    *
-   * @param queueId string representation of the queue id
-   * @return the rowkey of the corresponding queue. This returns null if the corresponding queue
-   * cannot be found.
+   * @param get The get that we want to query
+   * @return The result of the get if this server is the owner of the queue. Else it returns null
    * @throws IOException
    */
-  private byte[] queueIdToRowKey(String queueId) throws IOException {
-    Scan scan = new Scan();
-    scan.addColumn(CF, COL_QUEUE_ID);
-    scan.addColumn(CF, COL_OWNER);
+  private Result getResultIfOwner(Get get) throws IOException {
+    Scan scan = new Scan(get);
+    // Check if the Get currently contains all columns or only specific columns
+    if (scan.getFamilyMap().size() > 0) {
+      // Add the OWNER column if the scan is already only over specific columns
+      scan.addColumn(CF, COL_OWNER);
+    }
     scan.setMaxResultSize(1);
-    // Search for the queue that matches this queueId
-    SingleColumnValueFilter filterByQueueId = new SingleColumnValueFilter(CF, COL_QUEUE_ID,
-        CompareFilter.CompareOp.EQUAL, Bytes.toBytes(queueId));
-    // Make sure that we are the owners of the queue. QueueId's may overlap.
-    SingleColumnValueFilter filterByOwner = new SingleColumnValueFilter(CF, COL_OWNER,
-        CompareFilter.CompareOp.EQUAL, Bytes.toBytes(serverName));
-    // We only want the row key
-    FirstKeyOnlyFilter filterOutColumns = new FirstKeyOnlyFilter();
-    FilterList filterList = new FilterList(filterByQueueId, filterByOwner, filterOutColumns);
-    scan.setFilter(filterList);
-    ResultScanner results = replicationTable.getScanner(scan);
-    Result result = results.next();
-    results.close();
-    return (result == null) ? null : result.getRow();
+    SingleColumnValueFilter checkOwner = new SingleColumnValueFilter(CF, COL_OWNER,
+      CompareFilter.CompareOp.EQUAL, serverNameBytes);
+    scan.setFilter(checkOwner);
+    ResultScanner scanner = null;
+    try {
+      scanner = replicationTable.getScanner(scan);
+      Result result = scanner.next();
+      return (result == null || result.isEmpty()) ? null : result;
+    } finally {
+      if (scanner != null) {
+        scanner.close();
+      }
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index f03efff..a3635e4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -19,11 +19,11 @@
 package org.apache.hadoop.hbase.replication;
 
 import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -73,7 +73,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   private static final Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
 
   public ReplicationQueuesZKImpl(ReplicationQueuesArguments args) {
-    this(args.getZk(), args.getConf(), args.getAbort());
+    this(args.getZk(), args.getConf(), args.getAbortable());
   }
 
   public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
@@ -178,8 +178,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
   }
 
   @Override
-  public SortedMap<String, SortedSet<String>> claimQueues(String regionserverZnode) {
-    SortedMap<String, SortedSet<String>> newQueues = new TreeMap<String, SortedSet<String>>();
+  public Map<String, Set<String>> claimQueues(String regionserverZnode) {
+    Map<String, Set<String>> newQueues = new HashMap<>();
     // check whether there is multi support. If yes, use it.
     if (conf.getBoolean(HConstants.ZOOKEEPER_USEMULTI, true)) {
       LOG.info("Atomically moving " + regionserverZnode + "'s WALs to my queue");
@@ -304,8 +304,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
    * @param znode pertaining to the region server to copy the queues from
    * @return WAL queues sorted per peer cluster
    */
-  private SortedMap<String, SortedSet<String>> copyQueuesFromRSUsingMulti(String znode) {
-    SortedMap<String, SortedSet<String>> queues = new TreeMap<String, SortedSet<String>>();
+  private Map<String, Set<String>> copyQueuesFromRSUsingMulti(String znode) {
+    Map<String, Set<String>> queues = new HashMap<>();
     // hbase/replication/rs/deadrs
     String deadRSZnodePath = ZKUtil.joinZNode(this.queuesZNode, znode);
     List<String> peerIdsToProcess = null;
@@ -330,7 +330,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
           continue; // empty log queue.
         }
         // create the new cluster znode
-        SortedSet<String> logQueue = new TreeSet<String>();
+        Set<String> logQueue = new HashSet<String>();
         queues.put(newPeerId, logQueue);
         ZKUtilOp op = ZKUtilOp.createAndFailSilent(newPeerZnode, HConstants.EMPTY_BYTE_ARRAY);
         listOfOps.add(op);
@@ -373,10 +373,10 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
    * @param znode server names to copy
    * @return all wals for all peers of that cluster, null if an error occurred
    */
-  private SortedMap<String, SortedSet<String>> copyQueuesFromRS(String znode) {
+  private Map<String, Set<String>> copyQueuesFromRS(String znode) {
     // TODO this method isn't atomic enough, we could start copying and then
     // TODO fail for some reason and we would end up with znodes we don't want.
-    SortedMap<String, SortedSet<String>> queues = new TreeMap<String, SortedSet<String>>();
+    Map<String, Set<String>> queues = new HashMap<>();
     try {
       String nodePath = ZKUtil.joinZNode(this.queuesZNode, znode);
       List<String> clusters = ZKUtil.listChildrenNoWatch(this.zookeeper, nodePath);
@@ -406,7 +406,7 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
         }
         ZKUtil.createNodeIfNotExistsAndWatch(this.zookeeper, newClusterZnode,
           HConstants.EMPTY_BYTE_ARRAY);
-        SortedSet<String> logQueue = new TreeSet<String>();
+        Set<String> logQueue = new HashSet<String>();
         queues.put(newCluster, logQueue);
         for (String wal : wals) {
           String z = ZKUtil.joinZNode(clusterPath, wal);

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index e9330f4..433f9c5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -31,7 +31,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
-import java.util.SortedMap;
 import java.util.SortedSet;
 import java.util.TreeSet;
 import java.util.UUID;
@@ -642,7 +641,7 @@ public class ReplicationSourceManager implements ReplicationListener {
         LOG.info("Not transferring queue since we are shutting down");
         return;
       }
-      SortedMap<String, SortedSet<String>> newQueues = null;
+      Map<String, Set<String>> newQueues = null;
 
       newQueues = this.rq.claimQueues(rsZnode);
 
@@ -653,9 +652,9 @@ public class ReplicationSourceManager implements ReplicationListener {
         return;
       }
 
-      for (Map.Entry<String, SortedSet<String>> entry : newQueues.entrySet()) {
+      for (Map.Entry<String, Set<String>> entry : newQueues.entrySet()) {
         String peerId = entry.getKey();
-        SortedSet<String> walsSet = entry.getValue();
+        Set<String> walsSet = entry.getValue();
         try {
           // there is not an actual peer defined corresponding to peerId for the failover.
           ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(peerId);

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index de5cc31..b4451f2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -22,8 +22,8 @@ import static org.junit.Assert.*;
 
 import java.util.ArrayList;
 import java.util.List;
-import java.util.SortedMap;
-import java.util.SortedSet;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -146,7 +146,7 @@ public abstract class TestReplicationStateBasic {
     assertEquals(0, rq3.claimQueues(server1).size());
     assertEquals(2, rq3.getListOfReplicators().size());
 
-    SortedMap<String, SortedSet<String>> queues = rq2.claimQueues(server3);
+    Map<String, Set<String>> queues = rq2.claimQueues(server3);
     assertEquals(5, queues.size());
     assertEquals(1, rq2.getListOfReplicators().size());
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
index 8186213..bd6d070 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateHBaseImpl.java
@@ -18,26 +18,30 @@
 
 package org.apache.hadoop.hbase.replication;
 
-import junit.framework.Assert;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.zookeeper.KeeperException;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import java.io.IOException;
+import java.util.Map;
+import java.util.Set;
+
 import static junit.framework.TestCase.assertNull;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -48,10 +52,24 @@ public class TestReplicationStateHBaseImpl {
 
   private static Configuration conf;
   private static HBaseTestingUtility utility;
-  private static Connection connection;
-  private static ReplicationQueues rqH;
+  private static ZooKeeperWatcher zkw;
+  private static String replicationZNode;
+
+  private static ReplicationQueues rq1;
+  private static ReplicationQueues rq2;
+  private static ReplicationQueues rq3;
+  private static ReplicationPeers rp;
+
+  private static final String server1 = ServerName.valueOf("hostname1.example.org", 1234, 123L)
+      .toString();
+  private static final String server2 = ServerName.valueOf("hostname2.example.org", 1234, 1L)
+      .toString();
+  private static final String server3 = ServerName.valueOf("hostname3.example.org", 1234, 1L)
+      .toString();
 
-  private final String server1 = ServerName.valueOf("hostname1.example.org", 1234, -1L).toString();
+  private static DummyServer ds1;
+  private static DummyServer ds2;
+  private static DummyServer ds3;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
@@ -60,51 +78,63 @@ public class TestReplicationStateHBaseImpl {
     conf = utility.getConfiguration();
     conf.setClass("hbase.region.replica.replication.ReplicationQueuesType",
         ReplicationQueuesHBaseImpl.class, ReplicationQueues.class);
-    connection = ConnectionFactory.createConnection(conf);
+    conf.setClass("hbase.region.replica.replication.ReplicationQueuesType",
+      ReplicationQueuesHBaseImpl.class, ReplicationQueues.class);
+    zkw = HBaseTestingUtility.getZooKeeperWatcher(utility);
+    String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
+    replicationZNode = ZKUtil.joinZNode(zkw.baseZNode, replicationZNodeName);
+  }
+
+  @Before
+  public void setUp() {
+    try {
+      ds1 = new DummyServer(server1);
+      rq1 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds1, zkw));
+      rq1.init(server1);
+      ds2 = new DummyServer(server2);
+      rq2 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds2, zkw));
+      rq2.init(server2);
+      ds3 = new DummyServer(server3);
+      rq3 = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds3, zkw));
+      rq3.init(server3);
+      rp = ReplicationFactory.getReplicationPeers(zkw, conf, zkw);
+      rp.init();
+      rp.addPeer("Queue1", new ReplicationPeerConfig().setClusterKey("localhost:2818:/bogus1"));
+      rp.addPeer("Queue2", new ReplicationPeerConfig().setClusterKey("localhost:2818:/bogus2"));
+      rp.addPeer("Queue3", new ReplicationPeerConfig().setClusterKey("localhost:2818:/bogus3"));
+    } catch (Exception e) {
+      fail("testReplicationStateHBaseConstruction received an exception" + e.getMessage());
+    }
   }
 
   @Test
   public void checkNamingSchema() throws Exception {
-    rqH.init(server1);
-    assertTrue(rqH.isThisOurRegionServer(server1));
-    assertTrue(!rqH.isThisOurRegionServer(server1 + "a"));
-    assertTrue(!rqH.isThisOurRegionServer(null));
+    assertTrue(rq1.isThisOurRegionServer(server1));
+    assertTrue(!rq1.isThisOurRegionServer(server1 + "a"));
+    assertTrue(!rq1.isThisOurRegionServer(null));
   }
 
   @Test
-  public void testReplicationStateHBase() {
-    DummyServer ds = new DummyServer(server1);
-    try {
-      rqH = ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(conf, ds, null));
-      rqH.init(server1);
-      // Check that the proper System Tables have been generated
-      Table replicationTable = connection.getTable(
-          ReplicationQueuesHBaseImpl.REPLICATION_TABLE_NAME);
-      assertTrue(replicationTable.getName().isSystemTable());
-
-    } catch (Exception e) {
-      e.printStackTrace();
-      fail("testReplicationStateHBaseConstruction received an Exception");
-    }
+  public void testSingleReplicationQueuesHBaseImpl() {
     try {
       // Test adding in WAL files
-      assertEquals(0, rqH.getAllQueues().size());
-      rqH.addLog("Queue1", "WALLogFile1.1");
-      assertEquals(1, rqH.getAllQueues().size());
-      rqH.addLog("Queue1", "WALLogFile1.2");
-      rqH.addLog("Queue1", "WALLogFile1.3");
-      rqH.addLog("Queue1", "WALLogFile1.4");
-      rqH.addLog("Queue2", "WALLogFile2.1");
-      rqH.addLog("Queue3", "WALLogFile3.1");
-      assertEquals(3, rqH.getAllQueues().size());
-      assertEquals(4, rqH.getLogsInQueue("Queue1").size());
-      assertEquals(1, rqH.getLogsInQueue("Queue2").size());
-      assertEquals(1, rqH.getLogsInQueue("Queue3").size());
+      assertEquals(0, rq1.getAllQueues().size());
+      rq1.addLog("Queue1", "WALLogFile1.1");
+      assertEquals(1, rq1.getAllQueues().size());
+      rq1.addLog("Queue1", "WALLogFile1.2");
+      rq1.addLog("Queue1", "WALLogFile1.3");
+      rq1.addLog("Queue1", "WALLogFile1.4");
+      rq1.addLog("Queue2", "WALLogFile2.1");
+      rq1.addLog("Queue3", "WALLogFile3.1");
+      assertEquals(3, rq1.getAllQueues().size());
+      assertEquals(4, rq1.getLogsInQueue("Queue1").size());
+      assertEquals(1, rq1.getLogsInQueue("Queue2").size());
+      assertEquals(1, rq1.getLogsInQueue("Queue3").size());
       // Make sure that abortCount is still 0
-      assertEquals(0, ds.getAbortCount());
+      assertEquals(0, ds1.getAbortCount());
       // Make sure that getting a log from a non-existent queue triggers an abort
-      assertNull(rqH.getLogsInQueue("Queue4"));
-      assertEquals(1, ds.getAbortCount());
+      assertNull(rq1.getLogsInQueue("Queue4"));
+      assertEquals(1, ds1.getAbortCount());
     } catch (ReplicationException e) {
       e.printStackTrace();
       fail("testAddLog received a ReplicationException");
@@ -112,59 +142,186 @@ public class TestReplicationStateHBaseImpl {
     try {
 
       // Test updating the log positions
-      assertEquals(0L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
-      rqH.setLogPosition("Queue1", "WALLogFile1.1", 123L);
-      assertEquals(123L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
-      rqH.setLogPosition("Queue1", "WALLogFile1.1", 123456789L);
-      assertEquals(123456789L, rqH.getLogPosition("Queue1", "WALLogFile1.1"));
-      rqH.setLogPosition("Queue2", "WALLogFile2.1", 242L);
-      assertEquals(242L, rqH.getLogPosition("Queue2", "WALLogFile2.1"));
-      rqH.setLogPosition("Queue3", "WALLogFile3.1", 243L);
-      assertEquals(243L, rqH.getLogPosition("Queue3", "WALLogFile3.1"));
+      assertEquals(0L, rq1.getLogPosition("Queue1", "WALLogFile1.1"));
+      rq1.setLogPosition("Queue1", "WALLogFile1.1", 123L);
+      assertEquals(123L, rq1.getLogPosition("Queue1", "WALLogFile1.1"));
+      rq1.setLogPosition("Queue1", "WALLogFile1.1", 123456789L);
+      assertEquals(123456789L, rq1.getLogPosition("Queue1", "WALLogFile1.1"));
+      rq1.setLogPosition("Queue2", "WALLogFile2.1", 242L);
+      assertEquals(242L, rq1.getLogPosition("Queue2", "WALLogFile2.1"));
+      rq1.setLogPosition("Queue3", "WALLogFile3.1", 243L);
+      assertEquals(243L, rq1.getLogPosition("Queue3", "WALLogFile3.1"));
 
       // Test that setting log positions in non-existing logs will cause an abort
-      assertEquals(1, ds.getAbortCount());
-      rqH.setLogPosition("NotHereQueue", "WALLogFile3.1", 243L);
-      assertEquals(2, ds.getAbortCount());
-      rqH.setLogPosition("NotHereQueue", "NotHereFile", 243L);
-      assertEquals(3, ds.getAbortCount());
-      rqH.setLogPosition("Queue1", "NotHereFile", 243l);
-      assertEquals(4, ds.getAbortCount());
+      assertEquals(1, ds1.getAbortCount());
+      rq1.setLogPosition("NotHereQueue", "WALLogFile3.1", 243L);
+      assertEquals(2, ds1.getAbortCount());
+      rq1.setLogPosition("NotHereQueue", "NotHereFile", 243L);
+      assertEquals(3, ds1.getAbortCount());
+      rq1.setLogPosition("Queue1", "NotHereFile", 243l);
+      assertEquals(4, ds1.getAbortCount());
 
       // Test reading log positions for non-existent queues and WAL's
       try {
-        rqH.getLogPosition("Queue1", "NotHereWAL");
+        rq1.getLogPosition("Queue1", "NotHereWAL");
         fail("Replication queue should have thrown a ReplicationException for reading from a " +
             "non-existent WAL");
       } catch (ReplicationException e) {
       }
       try {
-        rqH.getLogPosition("NotHereQueue", "NotHereWAL");
+        rq1.getLogPosition("NotHereQueue", "NotHereWAL");
         fail("Replication queue should have thrown a ReplicationException for reading from a " +
             "non-existent queue");
       } catch (ReplicationException e) {
       }
       // Test removing logs
-      rqH.removeLog("Queue1", "WALLogFile1.1");
-      assertEquals(3, rqH.getLogsInQueue("Queue1").size());
+      rq1.removeLog("Queue1", "WALLogFile1.1");
+      assertEquals(3, rq1.getLogsInQueue("Queue1").size());
       // Test removing queues
-      rqH.removeQueue("Queue2");
-      assertEquals(2, rqH.getAllQueues().size());
-      assertNull(rqH.getLogsInQueue("Queue2"));
+      rq1.removeQueue("Queue2");
+      assertEquals(2, rq1.getAllQueues().size());
+      assertNull(rq1.getLogsInQueue("Queue2"));
       // Test that getting logs from a non-existent queue aborts
-      assertEquals(5, ds.getAbortCount());
+      assertEquals(5, ds1.getAbortCount());
       // Test removing all queues for a Region Server
-      rqH.removeAllQueues();
-      assertEquals(0, rqH.getAllQueues().size());
-      assertNull(rqH.getLogsInQueue("Queue1"));
+      rq1.removeAllQueues();
+      assertEquals(0, rq1.getAllQueues().size());
+      assertNull(rq1.getLogsInQueue("Queue1"));
       // Test that getting logs from a non-existent queue aborts
-      assertEquals(6, ds.getAbortCount());
+      assertEquals(6, ds1.getAbortCount());
     } catch (ReplicationException e) {
       e.printStackTrace();
       fail("testAddLog received a ReplicationException");
     }
   }
 
+  @Test
+  public void TestMultipleReplicationQueuesHBaseImpl () {
+    try {
+      // Test adding in WAL files
+      rq1.addLog("Queue1", "WALLogFile1.1");
+      rq1.addLog("Queue1", "WALLogFile1.2");
+      rq1.addLog("Queue1", "WALLogFile1.3");
+      rq1.addLog("Queue1", "WALLogFile1.4");
+      rq1.addLog("Queue2", "WALLogFile2.1");
+      rq1.addLog("Queue3", "WALLogFile3.1");
+      rq2.addLog("Queue1", "WALLogFile1.1");
+      rq2.addLog("Queue1", "WALLogFile1.2");
+      rq2.addLog("Queue2", "WALLogFile2.1");
+      rq3.addLog("Queue1", "WALLogFile1.1");
+      // Test adding logs to replication queues
+      assertEquals(3, rq1.getAllQueues().size());
+      assertEquals(2, rq2.getAllQueues().size());
+      assertEquals(1, rq3.getAllQueues().size());
+      assertEquals(4, rq1.getLogsInQueue("Queue1").size());
+      assertEquals(1, rq1.getLogsInQueue("Queue2").size());
+      assertEquals(1, rq1.getLogsInQueue("Queue3").size());
+      assertEquals(2, rq2.getLogsInQueue("Queue1").size());
+      assertEquals(1, rq2.getLogsInQueue("Queue2").size());
+      assertEquals(1, rq3.getLogsInQueue("Queue1").size());
+    } catch (ReplicationException e) {
+      e.printStackTrace();
+      fail("testAddLogs received a ReplicationException");
+    }
+    try {
+      // Test setting and reading offset in queues
+      rq1.setLogPosition("Queue1", "WALLogFile1.1", 1l);
+      rq1.setLogPosition("Queue1", "WALLogFile1.2", 2l);
+      rq1.setLogPosition("Queue1", "WALLogFile1.3", 3l);
+      rq1.setLogPosition("Queue2", "WALLogFile2.1", 4l);
+      rq1.setLogPosition("Queue2", "WALLogFile2.2", 5l);
+      rq1.setLogPosition("Queue3", "WALLogFile3.1", 6l);
+      rq2.setLogPosition("Queue1", "WALLogFile1.1", 7l);
+      rq2.setLogPosition("Queue2", "WALLogFile2.1", 8l);
+      rq3.setLogPosition("Queue1", "WALLogFile1.1", 9l);
+      assertEquals(1l, rq1.getLogPosition("Queue1", "WALLogFile1.1"));
+      assertEquals(2l, rq1.getLogPosition("Queue1", "WALLogFile1.2"));
+      assertEquals(4l, rq1.getLogPosition("Queue2", "WALLogFile2.1"));
+      assertEquals(6l, rq1.getLogPosition("Queue3", "WALLogFile3.1"));
+      assertEquals(7l, rq2.getLogPosition("Queue1", "WALLogFile1.1"));
+      assertEquals(8l, rq2.getLogPosition("Queue2", "WALLogFile2.1"));
+      assertEquals(9l, rq3.getLogPosition("Queue1", "WALLogFile1.1"));
+      assertEquals(rq1.getListOfReplicators().size(), 3);
+      assertEquals(rq2.getListOfReplicators().size(), 3);
+      assertEquals(rq3.getListOfReplicators().size(), 3);
+    } catch (ReplicationException e) {
+      fail("testAddLogs threw a ReplicationException");
+    }
+    try {
+      // Test claiming queues
+      Map<String, Set<String>> claimedQueuesFromRq2 = rq1.claimQueues(server2);
+      // Check to make sure that list of peers with outstanding queues is decremented by one
+      // after claimQueues
+      assertEquals(rq1.getListOfReplicators().size(), 2);
+      assertEquals(rq2.getListOfReplicators().size(), 2);
+      assertEquals(rq3.getListOfReplicators().size(), 2);
+      // Check to make sure that we claimed the proper number of queues
+      assertEquals(2, claimedQueuesFromRq2.size());
+      assertTrue(claimedQueuesFromRq2.containsKey("Queue1-" + server2));
+      assertTrue(claimedQueuesFromRq2.containsKey("Queue2-" + server2));
+      assertEquals(2, claimedQueuesFromRq2.get("Queue1-" + server2).size());
+      assertEquals(1, claimedQueuesFromRq2.get("Queue2-" + server2).size());
+      assertEquals(5, rq1.getAllQueues().size());
+      // Check that all the logs in the other queue were claimed
+      assertEquals(2, rq1.getLogsInQueue("Queue1-" + server2).size());
+      assertEquals(1, rq1.getLogsInQueue("Queue2-" + server2).size());
+      // Check that the offsets of the claimed queues are the same
+      assertEquals(7l, rq1.getLogPosition("Queue1-" + server2, "WALLogFile1.1"));
+      assertEquals(8l, rq1.getLogPosition("Queue2-" + server2, "WALLogFile2.1"));
+      // Check that the queues were properly removed from rq2
+      assertEquals(0, rq2.getAllQueues().size());
+      assertNull(rq2.getLogsInQueue("Queue1"));
+      assertNull(rq2.getLogsInQueue("Queue2"));
+      // Check that non-existent peer queues are not claimed
+      rq1.addLog("UnclaimableQueue", "WALLogFile1.1");
+      rq1.addLog("UnclaimableQueue", "WALLogFile1.2");
+      assertEquals(6, rq1.getAllQueues().size());
+      Map<String, Set<String>> claimedQueuesFromRq1 = rq3.claimQueues(server1);
+      assertEquals(rq1.getListOfReplicators().size(), 1);
+      assertEquals(rq2.getListOfReplicators().size(), 1);
+      assertEquals(rq3.getListOfReplicators().size(), 1);
+      // Note that we do not pick up the queue: UnclaimableQueue which was not registered in
+      // Replication Peers
+      assertEquals(6, rq3.getAllQueues().size());
+      // Test claiming non-existing queues
+      Map<String, Set<String>> noQueues = rq3.claimQueues("NotARealServer");
+      assertEquals(0, noQueues.size());
+      assertEquals(6, rq3.getAllQueues().size());
+      // Test claiming own queues
+      noQueues = rq3.claimQueues(server3);
+      assertEquals(0, noQueues.size());
+      assertEquals(6, rq3.getAllQueues().size());
+      // Check that rq3 still remain on list of replicators
+      assertEquals(1, rq3.getListOfReplicators().size());
+    } catch (ReplicationException e) {
+      fail("testClaimQueue threw a ReplicationException");
+    }
+  }
+
+  @After
+  public void clearQueues() throws Exception{
+    rq1.removeAllQueues();
+    rq2.removeAllQueues();
+    rq3.removeAllQueues();
+    assertEquals(0, rq1.getAllQueues().size());
+    assertEquals(0, rq2.getAllQueues().size());
+    assertEquals(0, rq3.getAllQueues().size());
+    ds1.resetAbortCount();
+    ds2.resetAbortCount();
+    ds3.resetAbortCount();
+  }
+
+  @After
+  public void tearDown() throws KeeperException, IOException {
+     ZKUtil.deleteNodeRecursively(zkw, replicationZNode);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    utility.shutdownMiniCluster();
+    utility.shutdownMiniZKCluster();
+  }
+
   static class DummyServer implements Server {
     private String serverName;
     private boolean isAborted = false;
@@ -239,5 +396,10 @@ public class TestReplicationStateHBaseImpl {
     public int getAbortCount() {
       return abortCount;
     }
+
+    public void resetAbortCount() {
+      abortCount = 0;
+    }
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
index e731135..972a400 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
-import org.apache.hadoop.hbase.replication.regionserver.Replication;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.testclassification.ReplicationTests;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;

http://git-wip-us.apache.org/repos/asf/hbase/blob/babdedc1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
index d1db068..e14fd3c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestReplicationSourceManager.java
@@ -31,7 +31,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
-import java.util.SortedMap;
+import java.util.Set;
 import java.util.SortedSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
@@ -389,7 +389,7 @@ public class TestReplicationSourceManager {
         ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s1.getConfiguration(), s1,
           s1.getZooKeeper()));
     rq1.init(s1.getServerName().toString());
-    SortedMap<String, SortedSet<String>> testMap =
+    Map<String, Set<String>> testMap =
         rq1.claimQueues(server.getServerName().getServerName());
     ReplicationQueues rq2 =
         ReplicationFactory.getReplicationQueues(new ReplicationQueuesArguments(s2.getConfiguration(), s2,
@@ -402,7 +402,7 @@ public class TestReplicationSourceManager {
     rq3.init(s3.getServerName().toString());
     testMap = rq3.claimQueues(s2.getServerName().getServerName());
 
-    ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(testMap.firstKey());
+    ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(testMap.keySet().iterator().next());
     List<String> result = replicationQueueInfo.getDeadRegionServers();
 
     // verify
@@ -523,7 +523,7 @@ public class TestReplicationSourceManager {
   }
 
   static class DummyNodeFailoverWorker extends Thread {
-    private SortedMap<String, SortedSet<String>> logZnodesMap;
+    private Map<String, Set<String>> logZnodesMap;
     Server server;
     private String deadRsZnode;
     ReplicationQueues rq;
@@ -553,12 +553,12 @@ public class TestReplicationSourceManager {
      * @return 1 when the map is not empty.
      */
     private int isLogZnodesMapPopulated() {
-      Collection<SortedSet<String>> sets = logZnodesMap.values();
+      Collection<Set<String>> sets = logZnodesMap.values();
       if (sets.size() > 1) {
         throw new RuntimeException("unexpected size of logZnodesMap: " + sets.size());
       }
       if (sets.size() == 1) {
-        SortedSet<String> s = sets.iterator().next();
+        Set<String> s = sets.iterator().next();
         for (String file : files) {
           // at least one file was missing
           if (!s.contains(file)) {


[23/50] hbase git commit: HBASE-15955 Disable action in CatalogJanitor#setEnabled should wait for active cleanup scan to finish (Stephen Yuan Jiang)

Posted by sy...@apache.org.
HBASE-15955 Disable action in CatalogJanitor#setEnabled should wait for active cleanup scan to finish (Stephen Yuan Jiang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de1b5ff7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de1b5ff7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de1b5ff7

Branch: refs/heads/hbase-12439
Commit: de1b5ff7761b899691085e147c885e15897a562c
Parents: 21e9827
Author: Stephen Yuan Jiang <sy...@gmail.com>
Authored: Fri Jun 3 15:48:48 2016 -0700
Committer: Stephen Yuan Jiang <sy...@gmail.com>
Committed: Fri Jun 3 15:49:23 2016 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/master/CatalogJanitor.java | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/de1b5ff7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index b9abc65..c93b307 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
+import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.hbase.util.Triple;
 
 /**
@@ -86,7 +87,17 @@ public class CatalogJanitor extends ScheduledChore {
    * @param enabled
    */
   public boolean setEnabled(final boolean enabled) {
-    return this.enabled.getAndSet(enabled);
+    boolean alreadyEnabled = this.enabled.getAndSet(enabled);
+    // If disabling is requested on an already enabled chore, we could have an active
+    // scan still going on, callers might not be aware of that and do further action thinkng
+    // that no action would be from this chore.  In this case, the right action is to wait for
+    // the active scan to complete before exiting this function.
+    if (!enabled && alreadyEnabled) {
+      while (alreadyRunning.get()) {
+        Threads.sleepWithoutInterrupt(100);
+      }
+    }
+    return alreadyEnabled;
   }
 
   boolean getEnabled() {


[21/50] hbase git commit: HBASE-15929 There are two tests named TestRegionServerMetrics. This causes slight issues when parsing console output for hanging tests and flaky tests analysis. Moving .../hbase/TestRegionServerMetrics.java to .../hbase/regionse

Posted by sy...@apache.org.
HBASE-15929 There are two tests named TestRegionServerMetrics. This causes slight issues when parsing console output for hanging tests and flaky tests analysis. Moving .../hbase/TestRegionServerMetrics.java to .../hbase/regionserver/TestRegionServerReadRequestMetrics.java. (Apekshit)

Change-Id: I379c15fe2c2c01bed53bddf7619d5f2a07c5640e


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a53d8b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a53d8b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a53d8b3

Branch: refs/heads/hbase-12439
Commit: 9a53d8b3850f0521c3a4ad194a3b84d7f4145007
Parents: b557f0b
Author: Apekshit <ap...@gmail.com>
Authored: Thu Jun 2 20:06:20 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Fri Jun 3 15:13:49 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/TestRegionServerMetrics.java   | 379 ------------------
 .../TestRegionServerReadRequestMetrics.java     | 387 +++++++++++++++++++
 2 files changed, 387 insertions(+), 379 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a53d8b3/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
deleted file mode 100644
index 76e5842..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
+++ /dev/null
@@ -1,379 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.client.Append;
-import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.Get;
-import org.apache.hadoop.hbase.client.Increment;
-import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.RowMutations;
-import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.BinaryComparator;
-import org.apache.hadoop.hbase.filter.CompareFilter;
-import org.apache.hadoop.hbase.filter.RowFilter;
-import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-@Category(MediumTests.class)
-public class TestRegionServerMetrics {
-  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-  private static final TableName TABLE_NAME = TableName.valueOf("test");
-  private static final byte[] CF1 = "c1".getBytes();
-  private static final byte[] CF2 = "c2".getBytes();
-
-  private static final byte[] ROW1 = "a".getBytes();
-  private static final byte[] ROW2 = "b".getBytes();
-  private static final byte[] ROW3 = "c".getBytes();
-  private static final byte[] COL1 = "q1".getBytes();
-  private static final byte[] COL2 = "q2".getBytes();
-  private static final byte[] COL3 = "q3".getBytes();
-  private static final byte[] VAL1 = "v1".getBytes();
-  private static final byte[] VAL2 = "v2".getBytes();
-  private static final byte[] VAL3 = Bytes.toBytes(0L);
-
-  private static final int MAX_TRY = 20;
-  private static final int SLEEP_MS = 100;
-  private static final int TTL = 1;
-
-  private static Admin admin;
-  private static Collection<ServerName> serverNames;
-  private static Table table;
-  private static List<HRegionInfo> tableRegions;
-
-  private static Map<Metric, Long> requestsMap = new HashMap<>();
-  private static Map<Metric, Long> requestsMapPrev = new HashMap<>();
-
-  @BeforeClass
-  public static void setUpOnce() throws Exception {
-    TEST_UTIL.startMiniCluster();
-    admin = TEST_UTIL.getAdmin();
-    serverNames = admin.getClusterStatus().getServers();
-    table = createTable();
-    putData();
-    tableRegions = admin.getTableRegions(TABLE_NAME);
-
-    for (Metric metric : Metric.values()) {
-      requestsMap.put(metric, 0L);
-      requestsMapPrev.put(metric, 0L);
-    }
-  }
-
-  private static Table createTable() throws IOException {
-    HTableDescriptor td = new HTableDescriptor(TABLE_NAME);
-    HColumnDescriptor cd1 = new HColumnDescriptor(CF1);
-    td.addFamily(cd1);
-    HColumnDescriptor cd2 = new HColumnDescriptor(CF2);
-    cd2.setTimeToLive(TTL);
-    td.addFamily(cd2);
-
-    admin.createTable(td);
-    return TEST_UTIL.getConnection().getTable(TABLE_NAME);
-  }
-
-  private static void testReadRequests(long resultCount,
-    long expectedReadRequests, long expectedFilteredReadRequests)
-    throws IOException, InterruptedException {
-    updateMetricsMap();
-    System.out.println("requestsMapPrev = " + requestsMapPrev);
-    System.out.println("requestsMap = " + requestsMap);
-
-    assertEquals(expectedReadRequests,
-      requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ));
-    assertEquals(expectedReadRequests,
-      requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ));
-    assertEquals(expectedFilteredReadRequests,
-      requestsMap.get(Metric.FILTERED_REGION_READ)
-        - requestsMapPrev.get(Metric.FILTERED_REGION_READ));
-    assertEquals(expectedFilteredReadRequests,
-      requestsMap.get(Metric.FILTERED_SERVER_READ)
-        - requestsMapPrev.get(Metric.FILTERED_SERVER_READ));
-    assertEquals(expectedReadRequests, resultCount);
-  }
-
-  private static void updateMetricsMap() throws IOException, InterruptedException {
-    for (Metric metric : Metric.values()) {
-      requestsMapPrev.put(metric, requestsMap.get(metric));
-    }
-
-    ServerLoad serverLoad = null;
-    RegionLoad regionLoadOuter = null;
-    boolean metricsUpdated = false;
-    for (int i = 0; i < MAX_TRY; i++) {
-      for (ServerName serverName : serverNames) {
-        serverLoad = admin.getClusterStatus().getLoad(serverName);
-
-        Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad();
-        for (HRegionInfo tableRegion : tableRegions) {
-          RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName());
-          if (regionLoad != null) {
-            regionLoadOuter = regionLoad;
-            for (Metric metric : Metric.values()) {
-              if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) {
-                for (Metric metricInner : Metric.values()) {
-                  requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner));
-                }
-                metricsUpdated = true;
-                break;
-              }
-            }
-          }
-        }
-      }
-      if (metricsUpdated) {
-        break;
-      }
-      Thread.sleep(SLEEP_MS);
-    }
-    if (!metricsUpdated) {
-      for (Metric metric : Metric.values()) {
-        requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric));
-      }
-    }
-  }
-
-  private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) {
-    switch (metric) {
-      case REGION_READ:
-        return regionLoad.getReadRequestsCount();
-      case SERVER_READ:
-        return serverLoad.getReadRequestsCount();
-      case FILTERED_REGION_READ:
-        return regionLoad.getFilteredReadRequestsCount();
-      case FILTERED_SERVER_READ:
-        return serverLoad.getFilteredReadRequestsCount();
-      default:
-        throw new IllegalStateException();
-    }
-  }
-
-  private static void putData() throws IOException {
-    Put put;
-
-    put = new Put(ROW1);
-    put.addColumn(CF1, COL1, VAL1);
-    put.addColumn(CF1, COL2, VAL2);
-    put.addColumn(CF1, COL3, VAL3);
-    table.put(put);
-    put = new Put(ROW2);
-    put.addColumn(CF1, COL1, VAL2);  // put val2 instead of val1
-    put.addColumn(CF1, COL2, VAL2);
-    table.put(put);
-    put = new Put(ROW3);
-    put.addColumn(CF1, COL1, VAL1);
-    put.addColumn(CF1, COL2, VAL2);
-    table.put(put);
-  }
-
-  private static void putTTLExpiredData() throws IOException, InterruptedException {
-    Put put;
-
-    put = new Put(ROW1);
-    put.addColumn(CF2, COL1, VAL1);
-    put.addColumn(CF2, COL2, VAL2);
-    table.put(put);
-
-    Thread.sleep(TTL * 1000);
-
-    put = new Put(ROW2);
-    put.addColumn(CF2, COL1, VAL1);
-    put.addColumn(CF2, COL2, VAL2);
-    table.put(put);
-
-    put = new Put(ROW3);
-    put.addColumn(CF2, COL1, VAL1);
-    put.addColumn(CF2, COL2, VAL2);
-    table.put(put);
-  }
-
-  @AfterClass
-  public static void tearDownOnce() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  @Test
-  public void testReadRequestsCountNotFiltered() throws Exception {
-    int resultCount;
-    Scan scan;
-    Append append;
-    Put put;
-    Increment increment;
-    Get get;
-
-    // test for scan
-    scan = new Scan();
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 3, 0);
-    }
-
-    // test for scan
-    scan = new Scan(ROW2, ROW3);
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 1, 0);
-    }
-
-    // test for get
-    get = new Get(ROW2);
-    Result result = table.get(get);
-    resultCount = result.isEmpty() ? 0 : 1;
-    testReadRequests(resultCount, 1, 0);
-
-    // test for increment
-    increment = new Increment(ROW1);
-    increment.addColumn(CF1, COL3, 1);
-    result = table.increment(increment);
-    resultCount = result.isEmpty() ? 0 : 1;
-    testReadRequests(resultCount, 1, 0);
-
-    // test for checkAndPut
-    put = new Put(ROW1);
-    put.addColumn(CF1, COL2, VAL2);
-    boolean checkAndPut =
-      table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put);
-    resultCount = checkAndPut ? 1 : 0;
-    testReadRequests(resultCount, 1, 0);
-
-    // test for append
-    append = new Append(ROW1);
-    append.add(CF1, COL2, VAL2);
-    result = table.append(append);
-    resultCount = result.isEmpty() ? 0 : 1;
-    testReadRequests(resultCount, 1, 0);
-
-    // test for checkAndMutate
-    put = new Put(ROW1);
-    put.addColumn(CF1, COL1, VAL1);
-    RowMutations rm = new RowMutations(ROW1);
-    rm.add(put);
-    boolean checkAndMutate =
-      table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm);
-    resultCount = checkAndMutate ? 1 : 0;
-    testReadRequests(resultCount, 1, 0);
-  }
-
-  @Test
-  public void testReadRequestsCountWithFilter() throws Exception {
-    int resultCount;
-    Scan scan;
-
-    // test for scan
-    scan = new Scan();
-    scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 2, 1);
-    }
-
-    // test for scan
-    scan = new Scan();
-    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 1, 2);
-    }
-
-    // test for scan
-    scan = new Scan(ROW2, ROW3);
-    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 0, 1);
-    }
-
-    // fixme filtered get should not increase readRequestsCount
-//    Get get = new Get(ROW2);
-//    get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
-//    Result result = table.get(get);
-//    resultCount = result.isEmpty() ? 0 : 1;
-//    testReadRequests(resultCount, 0, 1);
-  }
-
-  @Test
-  public void testReadRequestsCountWithDeletedRow() throws Exception {
-    try {
-      Delete delete = new Delete(ROW3);
-      table.delete(delete);
-
-      Scan scan = new Scan();
-      try (ResultScanner scanner = table.getScanner(scan)) {
-        int resultCount = 0;
-        for (Result ignore : scanner) {
-          resultCount++;
-        }
-        testReadRequests(resultCount, 2, 1);
-      }
-    } finally {
-      Put put = new Put(ROW3);
-      put.addColumn(CF1, COL1, VAL1);
-      put.addColumn(CF1, COL2, VAL2);
-      table.put(put);
-    }
-  }
-
-  @Test
-  public void testReadRequestsCountWithTTLExpiration() throws Exception {
-    putTTLExpiredData();
-
-    Scan scan = new Scan();
-    scan.addFamily(CF2);
-    try (ResultScanner scanner = table.getScanner(scan)) {
-      int resultCount = 0;
-      for (Result ignore : scanner) {
-        resultCount++;
-      }
-      testReadRequests(resultCount, 2, 1);
-    }
-  }
-
-  private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ}
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/9a53d8b3/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java
new file mode 100644
index 0000000..6867b99
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReadRequestMetrics.java
@@ -0,0 +1,387 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.RegionLoad;
+import org.apache.hadoop.hbase.ServerLoad;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(MediumTests.class)
+public class TestRegionServerReadRequestMetrics {
+  private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+  private static final TableName TABLE_NAME = TableName.valueOf("test");
+  private static final byte[] CF1 = "c1".getBytes();
+  private static final byte[] CF2 = "c2".getBytes();
+
+  private static final byte[] ROW1 = "a".getBytes();
+  private static final byte[] ROW2 = "b".getBytes();
+  private static final byte[] ROW3 = "c".getBytes();
+  private static final byte[] COL1 = "q1".getBytes();
+  private static final byte[] COL2 = "q2".getBytes();
+  private static final byte[] COL3 = "q3".getBytes();
+  private static final byte[] VAL1 = "v1".getBytes();
+  private static final byte[] VAL2 = "v2".getBytes();
+  private static final byte[] VAL3 = Bytes.toBytes(0L);
+
+  private static final int MAX_TRY = 20;
+  private static final int SLEEP_MS = 100;
+  private static final int TTL = 1;
+
+  private static Admin admin;
+  private static Collection<ServerName> serverNames;
+  private static Table table;
+  private static List<HRegionInfo> tableRegions;
+
+  private static Map<Metric, Long> requestsMap = new HashMap<>();
+  private static Map<Metric, Long> requestsMapPrev = new HashMap<>();
+
+  @BeforeClass
+  public static void setUpOnce() throws Exception {
+    TEST_UTIL.startMiniCluster();
+    admin = TEST_UTIL.getAdmin();
+    serverNames = admin.getClusterStatus().getServers();
+    table = createTable();
+    putData();
+    tableRegions = admin.getTableRegions(TABLE_NAME);
+
+    for (Metric metric : Metric.values()) {
+      requestsMap.put(metric, 0L);
+      requestsMapPrev.put(metric, 0L);
+    }
+  }
+
+  private static Table createTable() throws IOException {
+    HTableDescriptor td = new HTableDescriptor(TABLE_NAME);
+    HColumnDescriptor cd1 = new HColumnDescriptor(CF1);
+    td.addFamily(cd1);
+    HColumnDescriptor cd2 = new HColumnDescriptor(CF2);
+    cd2.setTimeToLive(TTL);
+    td.addFamily(cd2);
+
+    admin.createTable(td);
+    return TEST_UTIL.getConnection().getTable(TABLE_NAME);
+  }
+
+  private static void testReadRequests(long resultCount,
+    long expectedReadRequests, long expectedFilteredReadRequests)
+    throws IOException, InterruptedException {
+    updateMetricsMap();
+    System.out.println("requestsMapPrev = " + requestsMapPrev);
+    System.out.println("requestsMap = " + requestsMap);
+
+    assertEquals(expectedReadRequests,
+      requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ));
+    assertEquals(expectedReadRequests,
+      requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ));
+    assertEquals(expectedFilteredReadRequests,
+      requestsMap.get(Metric.FILTERED_REGION_READ)
+        - requestsMapPrev.get(Metric.FILTERED_REGION_READ));
+    assertEquals(expectedFilteredReadRequests,
+      requestsMap.get(Metric.FILTERED_SERVER_READ)
+        - requestsMapPrev.get(Metric.FILTERED_SERVER_READ));
+    assertEquals(expectedReadRequests, resultCount);
+  }
+
+  private static void updateMetricsMap() throws IOException, InterruptedException {
+    for (Metric metric : Metric.values()) {
+      requestsMapPrev.put(metric, requestsMap.get(metric));
+    }
+
+    ServerLoad serverLoad = null;
+    RegionLoad regionLoadOuter = null;
+    boolean metricsUpdated = false;
+    for (int i = 0; i < MAX_TRY; i++) {
+      for (ServerName serverName : serverNames) {
+        serverLoad = admin.getClusterStatus().getLoad(serverName);
+
+        Map<byte[], RegionLoad> regionsLoad = serverLoad.getRegionsLoad();
+        for (HRegionInfo tableRegion : tableRegions) {
+          RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName());
+          if (regionLoad != null) {
+            regionLoadOuter = regionLoad;
+            for (Metric metric : Metric.values()) {
+              if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) {
+                for (Metric metricInner : Metric.values()) {
+                  requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner));
+                }
+                metricsUpdated = true;
+                break;
+              }
+            }
+          }
+        }
+      }
+      if (metricsUpdated) {
+        break;
+      }
+      Thread.sleep(SLEEP_MS);
+    }
+    if (!metricsUpdated) {
+      for (Metric metric : Metric.values()) {
+        requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric));
+      }
+    }
+  }
+
+  private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) {
+    switch (metric) {
+      case REGION_READ:
+        return regionLoad.getReadRequestsCount();
+      case SERVER_READ:
+        return serverLoad.getReadRequestsCount();
+      case FILTERED_REGION_READ:
+        return regionLoad.getFilteredReadRequestsCount();
+      case FILTERED_SERVER_READ:
+        return serverLoad.getFilteredReadRequestsCount();
+      default:
+        throw new IllegalStateException();
+    }
+  }
+
+  private static void putData() throws IOException {
+    Put put;
+
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL1, VAL1);
+    put.addColumn(CF1, COL2, VAL2);
+    put.addColumn(CF1, COL3, VAL3);
+    table.put(put);
+    put = new Put(ROW2);
+    put.addColumn(CF1, COL1, VAL2);  // put val2 instead of val1
+    put.addColumn(CF1, COL2, VAL2);
+    table.put(put);
+    put = new Put(ROW3);
+    put.addColumn(CF1, COL1, VAL1);
+    put.addColumn(CF1, COL2, VAL2);
+    table.put(put);
+  }
+
+  private static void putTTLExpiredData() throws IOException, InterruptedException {
+    Put put;
+
+    put = new Put(ROW1);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+
+    Thread.sleep(TTL * 1000);
+
+    put = new Put(ROW2);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+
+    put = new Put(ROW3);
+    put.addColumn(CF2, COL1, VAL1);
+    put.addColumn(CF2, COL2, VAL2);
+    table.put(put);
+  }
+
+  @AfterClass
+  public static void tearDownOnce() throws Exception {
+    TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testReadRequestsCountNotFiltered() throws Exception {
+    int resultCount;
+    Scan scan;
+    Append append;
+    Put put;
+    Increment increment;
+    Get get;
+
+    // test for scan
+    scan = new Scan();
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 3, 0);
+    }
+
+    // test for scan
+    scan = new Scan(ROW2, ROW3);
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 1, 0);
+    }
+
+    // test for get
+    get = new Get(ROW2);
+    Result result = table.get(get);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for increment
+    increment = new Increment(ROW1);
+    increment.addColumn(CF1, COL3, 1);
+    result = table.increment(increment);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for checkAndPut
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL2, VAL2);
+    boolean checkAndPut =
+      table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put);
+    resultCount = checkAndPut ? 1 : 0;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for append
+    append = new Append(ROW1);
+    append.add(CF1, COL2, VAL2);
+    result = table.append(append);
+    resultCount = result.isEmpty() ? 0 : 1;
+    testReadRequests(resultCount, 1, 0);
+
+    // test for checkAndMutate
+    put = new Put(ROW1);
+    put.addColumn(CF1, COL1, VAL1);
+    RowMutations rm = new RowMutations(ROW1);
+    rm.add(put);
+    boolean checkAndMutate =
+      table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm);
+    resultCount = checkAndMutate ? 1 : 0;
+    testReadRequests(resultCount, 1, 0);
+  }
+
+  @Test
+  public void testReadRequestsCountWithFilter() throws Exception {
+    int resultCount;
+    Scan scan;
+
+    // test for scan
+    scan = new Scan();
+    scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 2, 1);
+    }
+
+    // test for scan
+    scan = new Scan();
+    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 1, 2);
+    }
+
+    // test for scan
+    scan = new Scan(ROW2, ROW3);
+    scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 0, 1);
+    }
+
+    // fixme filtered get should not increase readRequestsCount
+//    Get get = new Get(ROW2);
+//    get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+//    Result result = table.get(get);
+//    resultCount = result.isEmpty() ? 0 : 1;
+//    testReadRequests(resultCount, 0, 1);
+  }
+
+  @Test
+  public void testReadRequestsCountWithDeletedRow() throws Exception {
+    try {
+      Delete delete = new Delete(ROW3);
+      table.delete(delete);
+
+      Scan scan = new Scan();
+      try (ResultScanner scanner = table.getScanner(scan)) {
+        int resultCount = 0;
+        for (Result ignore : scanner) {
+          resultCount++;
+        }
+        testReadRequests(resultCount, 2, 1);
+      }
+    } finally {
+      Put put = new Put(ROW3);
+      put.addColumn(CF1, COL1, VAL1);
+      put.addColumn(CF1, COL2, VAL2);
+      table.put(put);
+    }
+  }
+
+  @Test
+  public void testReadRequestsCountWithTTLExpiration() throws Exception {
+    putTTLExpiredData();
+
+    Scan scan = new Scan();
+    scan.addFamily(CF2);
+    try (ResultScanner scanner = table.getScanner(scan)) {
+      int resultCount = 0;
+      for (Result ignore : scanner) {
+        resultCount++;
+      }
+      testReadRequests(resultCount, 2, 1);
+    }
+  }
+
+  private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ}
+}


[29/50] hbase git commit: HBASE-15698 Increment TimeRange not serialized to server (Ted Yu)

Posted by sy...@apache.org.
HBASE-15698 Increment TimeRange not serialized to server (Ted Yu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/376ad0d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/376ad0d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/376ad0d9

Branch: refs/heads/hbase-12439
Commit: 376ad0d9868e563e4153f8725afbf00a0293f72c
Parents: 7fd3532
Author: Andrew Purtell <ap...@apache.org>
Authored: Mon Jun 6 16:59:43 2016 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Mon Jun 6 22:17:00 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |  21 +-
 .../coprocessor/TestIncrementTimeRange.java     | 196 +++++++++++++++++++
 2 files changed, 210 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/376ad0d9/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index 0c34a17..fecc3c2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -1111,6 +1111,16 @@ public final class ProtobufUtil {
     return builder.build();
   }
 
+  static void setTimeRange(final MutationProto.Builder builder, final TimeRange timeRange) {
+    if (!timeRange.isAllTime()) {
+      HBaseProtos.TimeRange.Builder timeRangeBuilder =
+        HBaseProtos.TimeRange.newBuilder();
+      timeRangeBuilder.setFrom(timeRange.getMin());
+      timeRangeBuilder.setTo(timeRange.getMax());
+      builder.setTimeRange(timeRangeBuilder.build());
+    }
+  }
+
   /**
    * Convert a client Increment to a protobuf Mutate.
    *
@@ -1126,13 +1136,7 @@ public final class ProtobufUtil {
       builder.setNonce(nonce);
     }
     TimeRange timeRange = increment.getTimeRange();
-    if (!timeRange.isAllTime()) {
-      HBaseProtos.TimeRange.Builder timeRangeBuilder =
-        HBaseProtos.TimeRange.newBuilder();
-      timeRangeBuilder.setFrom(timeRange.getMin());
-      timeRangeBuilder.setTo(timeRange.getMax());
-      builder.setTimeRange(timeRangeBuilder.build());
-    }
+    setTimeRange(builder, timeRange);
     ColumnValue.Builder columnBuilder = ColumnValue.newBuilder();
     QualifierValue.Builder valueBuilder = QualifierValue.newBuilder();
     for (Map.Entry<byte[], List<Cell>> family: increment.getFamilyCellMap().entrySet()) {
@@ -1253,6 +1257,9 @@ public final class ProtobufUtil {
       final MutationProto.Builder builder, long nonce) throws IOException {
     getMutationBuilderAndSetCommonFields(type, mutation, builder);
     builder.setAssociatedCellCount(mutation.size());
+    if (mutation instanceof Increment) {
+      setTimeRange(builder, ((Increment)mutation).getTimeRange());
+    }
     if (nonce != HConstants.NO_NONCE) {
       builder.setNonce(nonce);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/376ad0d9/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
new file mode 100644
index 0000000..35ed531
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
@@ -0,0 +1,196 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.io.TimeRange;
+import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * This test runs batch mutation with Increments which have custom TimeRange.
+ * Custom Observer records the TimeRange.
+ * We then verify that the recorded TimeRange has same bounds as the initial TimeRange.
+ * See HBASE-15698
+ */
+@Category({CoprocessorTests.class, MediumTests.class})
+public class TestIncrementTimeRange {
+
+  private static final HBaseTestingUtility util = new HBaseTestingUtility();
+  private static ManualEnvironmentEdge mee = new ManualEnvironmentEdge();
+
+  private static final TableName TEST_TABLE = TableName.valueOf("test");
+  private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
+
+  private static final byte[] ROW_A = Bytes.toBytes("aaa");
+  private static final byte[] ROW_B = Bytes.toBytes("bbb");
+  private static final byte[] ROW_C = Bytes.toBytes("ccc");
+
+  private static final byte[] qualifierCol1 = Bytes.toBytes("col1");
+
+  private static final byte[] bytes1 = Bytes.toBytes(1);
+  private static final byte[] bytes2 = Bytes.toBytes(2);
+  private static final byte[] bytes3 = Bytes.toBytes(3);
+
+  private Table hTableInterface;
+  private Table table;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+    util.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
+        MyObserver.class.getName());
+    util.startMiniCluster();
+    EnvironmentEdgeManager.injectEdge(mee);
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  @Before
+  public void before() throws Exception {
+    table = util.createTable(TEST_TABLE, TEST_FAMILY);
+
+    Put puta = new Put(ROW_A);
+    puta.addColumn(TEST_FAMILY, qualifierCol1, bytes1);
+    table.put(puta);
+
+    Put putb = new Put(ROW_B);
+    putb.addColumn(TEST_FAMILY, qualifierCol1, bytes2);
+    table.put(putb);
+
+    Put putc = new Put(ROW_C);
+    putc.addColumn(TEST_FAMILY, qualifierCol1, bytes3);
+    table.put(putc);
+  }
+
+  @After
+  public void after() throws Exception {
+    try {
+      if (table != null) {
+        table.close();
+      }
+    } finally {
+      try {
+        util.deleteTable(TEST_TABLE);
+      } catch (IOException ioe) {
+      }
+    }
+  }
+
+  public static class MyObserver extends SimpleRegionObserver {
+    static TimeRange tr10 = null, tr2 = null;
+    @Override
+    public Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> e,
+        final Increment increment) throws IOException {
+      NavigableMap<byte [], List<Cell>> map = increment.getFamilyCellMap();
+      for (Map.Entry<byte [], List<Cell>> entry : map.entrySet()) {
+        for (Cell cell : entry.getValue()) {
+          long incr = Bytes.toLong(cell.getValueArray(), cell.getValueOffset(),
+              cell.getValueLength());
+          if (incr == 10) {
+            tr10 = increment.getTimeRange();
+          } else if (incr == 2 && !increment.getTimeRange().isAllTime()) {
+            tr2 = increment.getTimeRange();
+          }
+        }
+      }
+      return super.preIncrement(e, increment);
+    }
+  }
+
+  @Test
+  public void testHTableInterfaceMethods() throws Exception {
+    hTableInterface = util.getConnection().getTable(TEST_TABLE);
+    checkHTableInterfaceMethods();
+  }
+
+  private void checkHTableInterfaceMethods() throws Exception {
+    long time = EnvironmentEdgeManager.currentTime();
+    mee.setValue(time);
+    hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
+    checkRowValue(ROW_A, Bytes.toBytes(1L));
+
+    time = EnvironmentEdgeManager.currentTime();
+    mee.setValue(time);
+    TimeRange range10 = new TimeRange(1, time+10);
+    hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L)
+        .setTimeRange(range10.getMin(), range10.getMax()));
+    checkRowValue(ROW_A, Bytes.toBytes(11L));
+    assertEquals(MyObserver.tr10.getMin(), range10.getMin());
+    assertEquals(MyObserver.tr10.getMax(), range10.getMax());
+
+    time = EnvironmentEdgeManager.currentTime();
+    mee.setValue(time);
+    TimeRange range2 = new TimeRange(1, time+20);
+    List<Row> actions =
+        Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L)
+            .setTimeRange(range2.getMin(), range2.getMax()),
+            new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L)
+            .setTimeRange(range2.getMin(), range2.getMax()) });
+    Object[] results3 = new Object[actions.size()];
+    Object[] results1 = results3;
+    hTableInterface.batch(actions, results1);
+    assertEquals(MyObserver.tr2.getMin(), range2.getMin());
+    assertEquals(MyObserver.tr2.getMax(), range2.getMax());
+    for (Object r2 : results1) {
+      assertTrue(r2 instanceof Result);
+    }
+    checkRowValue(ROW_A, Bytes.toBytes(15L));
+
+    hTableInterface.close();
+  }
+
+  private void checkRowValue(byte[] row, byte[] expectedValue) throws IOException {
+    Get get = new Get(row).addColumn(TEST_FAMILY, qualifierCol1);
+    Result result = hTableInterface.get(get);
+    byte[] actualValue = result.getValue(TEST_FAMILY, qualifierCol1);
+    assertArrayEquals(expectedValue, actualValue);
+  }
+}


[37/50] hbase git commit: HBASE-15600 Add provision for adding mutations to memstore or able to write to same region in batchMutate coprocessor hooks (Rajeshbabu and Enis)

Posted by sy...@apache.org.
HBASE-15600 Add provision for adding mutations to memstore or able to write to same region in batchMutate coprocessor hooks (Rajeshbabu and Enis)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d05a3722
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d05a3722
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d05a3722

Branch: refs/heads/hbase-12439
Commit: d05a3722c8347363eb04a3e5457d13ae5d0d6de6
Parents: 3a95552
Author: Enis Soztutar <en...@apache.org>
Authored: Tue Jun 7 20:12:51 2016 -0700
Committer: Enis Soztutar <en...@apache.org>
Committed: Tue Jun 7 20:12:51 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/HRegion.java      |  76 ++++-
 .../MiniBatchOperationInProgress.java           |  29 +-
 .../regionserver/MultiRowMutationProcessor.java |   2 +-
 ...erverForAddingMutationsFromCoprocessors.java | 282 +++++++++++++++++++
 4 files changed, 370 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d05a3722/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6522fde..9c966cd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3134,6 +3134,35 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
         if (coprocessorHost.preBatchMutate(miniBatchOp)) {
           return 0L;
+        } else {
+          for (int i = firstIndex; i < lastIndexExclusive; i++) {
+            if (batchOp.retCodeDetails[i].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
+              // lastIndexExclusive was incremented above.
+              continue;
+            }
+            // we pass (i - firstIndex) below since the call expects a relative index
+            Mutation[] cpMutations = miniBatchOp.getOperationsFromCoprocessors(i - firstIndex);
+            if (cpMutations == null) {
+              continue;
+            }
+            // Else Coprocessor added more Mutations corresponding to the Mutation at this index.
+            for (int j = 0; j < cpMutations.length; j++) {
+              Mutation cpMutation = cpMutations[j];
+              Map<byte[], List<Cell>> cpFamilyMap = cpMutation.getFamilyCellMap();
+              checkAndPrepareMutation(cpMutation, replay, cpFamilyMap, now);
+
+              // Acquire row locks. If not, the whole batch will fail.
+              acquiredRowLocks.add(getRowLockInternal(cpMutation.getRow(), true));
+
+              if (cpMutation.getDurability() == Durability.SKIP_WAL) {
+                recordMutationWithoutWal(cpFamilyMap);
+              }
+
+              // Returned mutations from coprocessor correspond to the Mutation at index i. We can
+              // directly add the cells from those mutations to the familyMaps of this mutation.
+              mergeFamilyMaps(familyMaps[i], cpFamilyMap); // will get added to the memstore later
+            }
+          }
         }
       }
 
@@ -3310,9 +3339,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         // call the coprocessor hook to do any finalization steps
         // after the put is done
         MiniBatchOperationInProgress<Mutation> miniBatchOp =
-            new MiniBatchOperationInProgress<Mutation>(batchOp.getMutationsForCoprocs(),
-                batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex,
-                lastIndexExclusive);
+          new MiniBatchOperationInProgress<Mutation>(batchOp.getMutationsForCoprocs(),
+          batchOp.retCodeDetails, batchOp.walEditsFromCoprocessors, firstIndex, lastIndexExclusive);
         coprocessorHost.postBatchMutateIndispensably(miniBatchOp, success);
       }
 
@@ -3320,6 +3348,18 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     }
   }
 
+  private void mergeFamilyMaps(Map<byte[], List<Cell>> familyMap,
+      Map<byte[], List<Cell>> toBeMerged) {
+    for (Map.Entry<byte[], List<Cell>> entry : toBeMerged.entrySet()) {
+      List<Cell> cells = familyMap.get(entry.getKey());
+      if (cells == null) {
+        familyMap.put(entry.getKey(), entry.getValue());
+      } else {
+        cells.addAll(entry.getValue());
+      }
+    }
+  }
+
   private void appendCurrentNonces(final Mutation mutation, final boolean replay,
       final WALEdit walEdit, final long now, final long currentNonceGroup, final long currentNonce)
   throws IOException {
@@ -3348,18 +3388,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     familyMaps[lastIndexExclusive] = familyMap;
 
     try {
-      if (mutation instanceof Put) {
-        // Check the families in the put. If bad, skip this one.
-        if (batchOp.isInReplay()) {
-          removeNonExistentColumnFamilyForReplay(familyMap);
-        } else {
-          checkFamilies(familyMap.keySet());
-        }
-        checkTimestamps(mutation.getFamilyCellMap(), now);
-      } else {
-        prepareDelete((Delete)mutation);
-      }
-      checkRow(mutation.getRow(), "doMiniBatchMutation");
+      checkAndPrepareMutation(mutation, batchOp.isInReplay(), familyMap, now);
     } catch (NoSuchColumnFamilyException nscf) {
       LOG.warn("No such column family in batch mutation", nscf);
       batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(
@@ -3379,6 +3408,23 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     return skip;
   }
 
+  private void checkAndPrepareMutation(Mutation mutation, boolean replay,
+      final Map<byte[], List<Cell>> familyMap, final long now)
+          throws IOException {
+    if (mutation instanceof Put) {
+      // Check the families in the put. If bad, skip this one.
+      if (replay) {
+        removeNonExistentColumnFamilyForReplay(familyMap);
+      } else {
+        checkFamilies(familyMap.keySet());
+      }
+      checkTimestamps(mutation.getFamilyCellMap(), now);
+    } else {
+      prepareDelete((Delete)mutation);
+    }
+    checkRow(mutation.getRow(), "doMiniBatchMutation");
+  }
+
   /**
    * During replay, there could exist column families which are removed between region server
    * failure and replay

http://git-wip-us.apache.org/repos/asf/hbase/blob/d05a3722/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
index 2b12dec..cdbecac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MiniBatchOperationInProgress.java
@@ -18,20 +18,22 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
 
 /**
  * Wraps together the mutations which are applied as a batch to the region and their operation
- * status and WALEdits. 
+ * status and WALEdits.
  * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#preBatchMutate(
  * ObserverContext, MiniBatchOperationInProgress)
  * @see org.apache.hadoop.hbase.coprocessor.RegionObserver#postBatchMutate(
  * ObserverContext, MiniBatchOperationInProgress)
  * @param T Pair&lt;Mutation, Integer&gt; pair of Mutations and associated rowlock ids .
  */
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate("Coprocessors")
 public class MiniBatchOperationInProgress<T> {
   private final T[] operations;
+  private Mutation[][] operationsFromCoprocessors;
   private final OperationStatus[] retCodeDetails;
   private final WALEdit[] walEditsFromCoprocessors;
   private final int firstIndex;
@@ -63,7 +65,7 @@ public class MiniBatchOperationInProgress<T> {
 
   /**
    * Sets the status code for the operation(Mutation) at the specified position.
-   * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} 
+   * By setting this status, {@link org.apache.hadoop.hbase.coprocessor.RegionObserver}
    * can make HRegion to skip Mutations.
    * @param index
    * @param opStatus
@@ -103,4 +105,25 @@ public class MiniBatchOperationInProgress<T> {
     }
     return this.firstIndex + index;
   }
+
+  /**
+   * Add more Mutations corresponding to the Mutation at the given index to be committed atomically
+   * in the same batch. These mutations are applied to the WAL and applied to the memstore as well.
+   * The timestamp of the cells in the given Mutations MUST be obtained from the original mutation.
+   *
+   * @param index the index that corresponds to the original mutation index in the batch
+   * @param newOperations the Mutations to add
+   */
+  public void addOperationsFromCP(int index, Mutation[] newOperations) {
+    if (this.operationsFromCoprocessors == null) {
+      // lazy allocation to save on object allocation in case this is not used
+      this.operationsFromCoprocessors = new Mutation[operations.length][];
+    }
+    this.operationsFromCoprocessors[getAbsoluteIndex(index)] = newOperations;
+  }
+
+  public Mutation[] getOperationsFromCoprocessors(int index) {
+    return operationsFromCoprocessors == null ? null :
+        operationsFromCoprocessors[getAbsoluteIndex(index)];
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d05a3722/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
index 1947a1b..995ea93 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MultiRowMutationProcessor.java
@@ -60,7 +60,7 @@ MultiRowMutationProcessorResponse> {
   public boolean readOnly() {
     return false;
   }
-  
+
   @Override
   public MultiRowMutationProcessorResponse getResult() {
     return MultiRowMutationProcessorResponse.getDefaultInstance();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d05a3722/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java
new file mode 100644
index 0000000..98e930a
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionObserverForAddingMutationsFromCoprocessors.java
@@ -0,0 +1,282 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.coprocessor;
+
+import static org.junit.Assert.*;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import com.google.common.collect.Lists;
+
+@Category(MediumTests.class)
+public class TestRegionObserverForAddingMutationsFromCoprocessors {
+
+  private static final Log LOG
+    = LogFactory.getLog(TestRegionObserverForAddingMutationsFromCoprocessors.class);
+
+  private static HBaseTestingUtility util;
+  private static final byte[] dummy = Bytes.toBytes("dummy");
+  private static final byte[] row1 = Bytes.toBytes("r1");
+  private static final byte[] row2 = Bytes.toBytes("r2");
+  private static final byte[] row3 = Bytes.toBytes("r3");
+  private static final byte[] test = Bytes.toBytes("test");
+
+  @Rule
+  public TestName name = new TestName();
+  private TableName tableName;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    Configuration conf = HBaseConfiguration.create();
+    conf.set(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, TestWALObserver.class.getName());
+    util = new HBaseTestingUtility(conf);
+    util.startMiniCluster();
+  }
+
+  @AfterClass
+  public static void tearDownAfterClass() throws Exception {
+    util.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    tableName = TableName.valueOf(name.getMethodName());
+  }
+
+  private void createTable(String coprocessor) throws IOException {
+    HTableDescriptor htd = new HTableDescriptor(tableName)
+        .addFamily(new HColumnDescriptor(dummy))
+        .addFamily(new HColumnDescriptor(test))
+        .addCoprocessor(coprocessor);
+    util.getAdmin().createTable(htd);
+  }
+
+  /**
+   * Test various multiput operations.
+   * @throws Exception
+   */
+  @Test
+  public void testMulti() throws Exception {
+    createTable(TestMultiMutationCoprocessor.class.getName());
+
+    try (Table t = util.getConnection().getTable(tableName)) {
+      t.put(new Put(row1).addColumn(test, dummy, dummy));
+      assertRowCount(t, 3);
+    }
+  }
+
+  /**
+   * Tests that added mutations from coprocessors end up in the WAL.
+   */
+  @Test
+  public void testCPMutationsAreWrittenToWALEdit() throws Exception {
+    createTable(TestMultiMutationCoprocessor.class.getName());
+
+    try (Table t = util.getConnection().getTable(tableName)) {
+      t.put(new Put(row1).addColumn(test, dummy, dummy));
+      assertRowCount(t, 3);
+    }
+
+    assertNotNull(TestWALObserver.savedEdit);
+    assertEquals(4, TestWALObserver.savedEdit.getCells().size());
+  }
+
+  private static void assertRowCount(Table t, int expected) throws IOException {
+    try (ResultScanner scanner = t.getScanner(new Scan())) {
+      int i = 0;
+      for (Result r: scanner) {
+        LOG.info(r.toString());
+        i++;
+      }
+      assertEquals(expected, i);
+    }
+  }
+
+  @Test
+  public void testDeleteCell() throws Exception {
+    createTable(TestDeleteCellCoprocessor.class.getName());
+
+    try (Table t = util.getConnection().getTable(tableName)) {
+      t.put(Lists.newArrayList(
+        new Put(row1).addColumn(test, dummy, dummy),
+        new Put(row2).addColumn(test, dummy, dummy),
+        new Put(row3).addColumn(test, dummy, dummy)
+          ));
+
+      assertRowCount(t, 3);
+
+      t.delete(new Delete(test).addColumn(test, dummy)); // delete non-existing row
+      assertRowCount(t, 1);
+    }
+  }
+
+  @Test
+  public void testDeleteFamily() throws Exception {
+    createTable(TestDeleteFamilyCoprocessor.class.getName());
+
+    try (Table t = util.getConnection().getTable(tableName)) {
+      t.put(Lists.newArrayList(
+        new Put(row1).addColumn(test, dummy, dummy),
+        new Put(row2).addColumn(test, dummy, dummy),
+        new Put(row3).addColumn(test, dummy, dummy)
+          ));
+
+      assertRowCount(t, 3);
+
+      t.delete(new Delete(test).addFamily(test)); // delete non-existing row
+      assertRowCount(t, 1);
+    }
+  }
+
+  @Test
+  public void testDeleteRow() throws Exception {
+    createTable(TestDeleteRowCoprocessor.class.getName());
+
+    try (Table t = util.getConnection().getTable(tableName)) {
+      t.put(Lists.newArrayList(
+        new Put(row1).addColumn(test, dummy, dummy),
+        new Put(row2).addColumn(test, dummy, dummy),
+        new Put(row3).addColumn(test, dummy, dummy)
+          ));
+
+      assertRowCount(t, 3);
+
+      t.delete(new Delete(test).addColumn(test, dummy)); // delete non-existing row
+      assertRowCount(t, 1);
+    }
+  }
+
+  public static class TestMultiMutationCoprocessor extends BaseRegionObserver {
+    @Override
+    public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
+        MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+      Mutation mut = miniBatchOp.getOperation(0);
+      List<Cell> cells = mut.getFamilyCellMap().get(test);
+      Put[] puts = new Put[] {
+          new Put(row1).addColumn(test, dummy, cells.get(0).getTimestamp(),
+            Bytes.toBytes("cpdummy")),
+          new Put(row2).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy),
+          new Put(row3).addColumn(test, dummy, cells.get(0).getTimestamp(), dummy),
+      };
+      LOG.info("Putting:" + puts);
+      miniBatchOp.addOperationsFromCP(0, puts);
+    }
+  }
+
+  public static class TestDeleteCellCoprocessor extends BaseRegionObserver {
+    @Override
+    public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
+        MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+      Mutation mut = miniBatchOp.getOperation(0);
+
+      if (mut instanceof Delete) {
+        List<Cell> cells = mut.getFamilyCellMap().get(test);
+        Delete[] deletes = new Delete[] {
+            // delete only 2 rows
+            new Delete(row1).addColumns(test, dummy, cells.get(0).getTimestamp()),
+            new Delete(row2).addColumns(test, dummy, cells.get(0).getTimestamp()),
+        };
+        LOG.info("Deleting:" + Arrays.toString(deletes));
+        miniBatchOp.addOperationsFromCP(0, deletes);
+      }
+    }
+  }
+
+  public static class TestDeleteFamilyCoprocessor extends BaseRegionObserver {
+    @Override
+    public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
+        MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+      Mutation mut = miniBatchOp.getOperation(0);
+
+      if (mut instanceof Delete) {
+        List<Cell> cells = mut.getFamilyCellMap().get(test);
+        Delete[] deletes = new Delete[] {
+            // delete only 2 rows
+            new Delete(row1).addFamily(test, cells.get(0).getTimestamp()),
+            new Delete(row2).addFamily(test, cells.get(0).getTimestamp()),
+        };
+        LOG.info("Deleting:" + Arrays.toString(deletes));
+        miniBatchOp.addOperationsFromCP(0, deletes);
+      }
+    }
+  }
+
+  public static class TestDeleteRowCoprocessor extends BaseRegionObserver {
+    @Override
+    public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
+        MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
+      Mutation mut = miniBatchOp.getOperation(0);
+
+      if (mut instanceof Delete) {
+        List<Cell> cells = mut.getFamilyCellMap().get(test);
+        Delete[] deletes = new Delete[] {
+            // delete only 2 rows
+            new Delete(row1, cells.get(0).getTimestamp()),
+            new Delete(row2, cells.get(0).getTimestamp()),
+        };
+        LOG.info("Deleting:" + Arrays.toString(deletes));
+        miniBatchOp.addOperationsFromCP(0, deletes);
+      }
+    }
+  }
+
+  public static class TestWALObserver extends BaseWALObserver {
+    static WALEdit savedEdit = null;
+    @Override
+    public void postWALWrite(ObserverContext<? extends WALCoprocessorEnvironment> ctx,
+        HRegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
+      if (info.getTable().equals(TableName.valueOf("testCPMutationsAreWrittenToWALEdit"))) {
+        savedEdit = logEdit;
+      }
+      super.postWALWrite(ctx, info, logKey, logEdit);
+    }
+  }
+}


[31/50] hbase git commit: HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers" Adds HADOOP-9955 RPC idle connection closing is extremely inefficient Then removes queue added by HADOOP-9956 at Enis suggestion

Posted by sy...@apache.org.
HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers"
Adds HADOOP-9955 RPC idle connection closing is extremely inefficient
Then removes queue added by HADOOP-9956 at Enis suggestion

    Changes how we do accounting of Connections to match how it is done in Hadoop.
    Adds a ConnectionManager class. Adds new configurations for this new class.

    "hbase.ipc.client.idlethreshold" 4000
    "hbase.ipc.client.connection.idle-scan-interval.ms" 10000
    "hbase.ipc.client.connection.maxidletime" 10000
    "hbase.ipc.client.kill.max", 10
    "hbase.ipc.server.handler.queue.size", 100

    The new scheme does away with synchronization that purportedly would freeze out
    reads while we were cleaning up stale connections (according to HADOOP-9955)

    Also adds in new mechanism for accepting Connections by pulling in as many
    as we can at a time adding them to a Queue instead of doing one at a time.
    Can help when bursty traffic according to HADOOP-9956. Removes a blocking
    while Reader is busy parsing a request. Adds configuration
    "hbase.ipc.server.read.connection-queue.size" with default of 100 for
    queue size.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0b70c00
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0b70c00
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0b70c00

Branch: refs/heads/hbase-12439
Commit: e0b70c00e74aeaac33570508e3732a53daea839e
Parents: da88b48
Author: stack <st...@apache.org>
Authored: Fri Jun 3 15:38:07 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 13:10:14 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/MetricsHBaseServerSource.java     |  10 +-
 .../ipc/MetricsHBaseServerWrapperImpl.java      |   6 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 408 +++++++++++--------
 .../regionserver/SimpleRpcSchedulerFactory.java |   2 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java       |   2 +-
 5 files changed, 241 insertions(+), 187 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e0b70c00/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index bb89789..ce57e0f 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -52,14 +52,16 @@ public interface MetricsHBaseServerSource extends BaseSource {
   String TOTAL_CALL_TIME_NAME = "totalCallTime";
   String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time.";
   String QUEUE_SIZE_NAME = "queueSize";
-  String QUEUE_SIZE_DESC = "Number of bytes in the call queues.";
+  String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " +
+    "parsed and is waiting to run or is currently being executed.";
   String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue";
-  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue.";
+  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
+    "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
-      "Number of calls in the replication call queue.";
-  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
+      "Number of calls in the replication call queue waiting to be run";
+  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0b70c00/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
index 9979c75..4f53709 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
@@ -36,7 +36,7 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
     if (!isServerStarted()) {
       return 0;
     }
-    return server.callQueueSize.get();
+    return server.callQueueSizeInBytes.get();
   }
 
   @Override
@@ -65,10 +65,10 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
 
   @Override
   public int getNumOpenConnections() {
-    if (!isServerStarted() || this.server.connectionList == null) {
+    if (!isServerStarted()) {
       return 0;
     }
-    return server.connectionList.size();
+    return server.getNumOpenConnections();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0b70c00/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 483ce86..aca3fdd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -48,15 +48,16 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -113,6 +114,7 @@ import org.apache.hadoop.hbase.util.Counter;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
@@ -183,11 +185,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
 
-  /**
-   * The maximum size that we can hold in the RPC queue
-   */
-  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
-
   private final IPCUtil ipcUtil;
 
   private static final String AUTH_FAILED_FOR = "Auth failed for ";
@@ -210,22 +207,30 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   protected int port;                             // port we listen on
   protected InetSocketAddress address;            // inet address we listen on
   private int readThreads;                        // number of read threads
-  protected int maxIdleTime;                      // the maximum idle time after
-                                                  // which a client may be
-                                                  // disconnected
-  protected int thresholdIdleConnections;         // the number of idle
-                                                  // connections after which we
-                                                  // will start cleaning up idle
-                                                  // connections
-  int maxConnectionsToNuke;                       // the max number of
-                                                  // connections to nuke
-                                                  // during a cleanup
-
   protected MetricsHBaseServer metrics;
 
   protected final Configuration conf;
 
-  private int maxQueueSize;
+  /**
+   * Maximum size in bytes of the currently queued and running Calls. If a new Call puts us over
+   * this size, then we will reject the call (after parsing it though). It will go back to the
+   * client and client will retry. Set this size with "hbase.ipc.server.max.callqueue.size". The
+   * call queue size gets incremented after we parse a call and before we add it to the queue of
+   * calls for the scheduler to use. It get decremented after we have 'run' the Call. The current
+   * size is kept in {@link #callQueueSizeInBytes}.
+   * @see {@link #callQueueSizeInBytes}
+   * @see {@link #DEFAULT_MAX_CALLQUEUE_SIZE}
+   * @see {@link #callQueueSizeInBytes}
+   */
+  private final long maxQueueSizeInBytes;
+  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
+
+  /**
+   * This is a running count of the size in bytes of all outstanding calls whether currently
+   * executing or queued waiting to be run.
+   */
+  protected final Counter callQueueSizeInBytes = new Counter();
+
   protected int socketSendBufferSize;
   protected final boolean tcpNoDelay;   // if T then disable Nagle's Algorithm
   protected final boolean tcpKeepAlive; // if T then use keepalives
@@ -244,19 +249,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   volatile boolean started = false;
 
-  /**
-   * This is a running count of the size of all outstanding calls by size.
-   */
-  protected final Counter callQueueSize = new Counter();
-
-  protected final List<Connection> connectionList =
-    Collections.synchronizedList(new LinkedList<Connection>());
-  //maintain a list
-  //of client connections
+  // maintains the set of client connections and handles idle timeouts
+  private ConnectionManager connectionManager;
   private Listener listener = null;
   protected Responder responder = null;
   protected AuthenticationTokenSecretManager authTokenSecretMgr = null;
-  protected int numConnections = 0;
 
   protected HBaseRPCErrorHandler errorHandler = null;
 
@@ -623,18 +620,13 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private Selector selector = null; //the selector that we use for the server
     private Reader[] readers = null;
     private int currentReader = 0;
-    private Random rand = new Random();
-    private long lastCleanupRunTime = 0; //the last time when a cleanup connec-
-                                         //-tion (for idle connections) ran
-    private long cleanupInterval = 10000; //the minimum interval between
-                                          //two cleanup runs
-    private int backlogLength;
 
     private ExecutorService readPool;
 
     public Listener(final String name) throws IOException {
       super(name);
-      backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+      // The backlog of requests that we will have the serversocket carry.
+      int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
       // Create a new server socket and set to non blocking mode
       acceptChannel = ServerSocketChannel.open();
       acceptChannel.configureBlocking(false);
@@ -644,9 +636,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
       address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
       // create a selector;
-      selector= Selector.open();
+      selector = Selector.open();
 
       readers = new Reader[readThreads];
+      // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it
+      // has an advantage in that it is easy to shutdown the pool.
       readPool = Executors.newFixedThreadPool(readThreads,
         new ThreadFactoryBuilder().setNameFormat(
           "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
@@ -667,12 +661,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
 
     private class Reader implements Runnable {
-      private volatile boolean adding = false;
       private final Selector readSelector;
 
       Reader() throws IOException {
         this.readSelector = Selector.open();
       }
+
       @Override
       public void run() {
         try {
@@ -686,14 +680,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         }
       }
 
-      private synchronized void doRunLoop() {
+      private void doRunLoop() {
         while (running) {
           try {
             readSelector.select();
-            while (adding) {
-              this.wait(1000);
-            }
-
             Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
             while (iter.hasNext()) {
               SelectionKey key = iter.next();
@@ -703,9 +693,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
                   doRead(key);
                 }
               }
+              key = null;
             }
           } catch (InterruptedException e) {
-            LOG.debug("Interrupted while sleeping");
+            if (running) {                      // unexpected -- log it
+              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
+            }
             return;
           } catch (IOException ex) {
             LOG.info(getName() + ": IOException in Reader", ex);
@@ -714,76 +707,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
 
       /**
-       * This gets reader into the state that waits for the new channel
-       * to be registered with readSelector. If it was waiting in select()
-       * the thread will be woken up, otherwise whenever select() is called
-       * it will return even if there is nothing to read and wait
-       * in while(adding) for finishAdd call
+       * Updating the readSelector while it's being used is not thread-safe,
+       * so the connection must be queued.  The reader will drain the queue
+       * and update its readSelector before performing the next select
        */
-      public void startAdd() {
-        adding = true;
+      public void addConnection(Connection conn) throws IOException {
+        conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
         readSelector.wakeup();
       }
-
-      public synchronized SelectionKey registerChannel(SocketChannel channel)
-        throws IOException {
-        return channel.register(readSelector, SelectionKey.OP_READ);
-      }
-
-      public synchronized void finishAdd() {
-        adding = false;
-        this.notify();
-      }
-    }
-
-    /** cleanup connections from connectionList. Choose a random range
-     * to scan and also have a limit on the number of the connections
-     * that will be cleanedup per run. The criteria for cleanup is the time
-     * for which the connection was idle. If 'force' is true then all
-     * connections will be looked at for the cleanup.
-     * @param force all connections will be looked at for cleanup
-     */
-    private void cleanupConnections(boolean force) {
-      if (force || numConnections > thresholdIdleConnections) {
-        long currentTime = System.currentTimeMillis();
-        if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
-          return;
-        }
-        int start = 0;
-        int end = numConnections - 1;
-        if (!force) {
-          start = rand.nextInt() % numConnections;
-          end = rand.nextInt() % numConnections;
-          int temp;
-          if (end < start) {
-            temp = start;
-            start = end;
-            end = temp;
-          }
-        }
-        int i = start;
-        int numNuked = 0;
-        while (i <= end) {
-          Connection c;
-          synchronized (connectionList) {
-            try {
-              c = connectionList.get(i);
-            } catch (Exception e) {return;}
-          }
-          if (c.timedOut(currentTime)) {
-            if (LOG.isDebugEnabled())
-              LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
-            closeConnection(c);
-            numNuked++;
-            end--;
-            //noinspection UnusedAssignment
-            c = null;
-            if (!force && numNuked == maxConnectionsToNuke) break;
-          }
-          else i++;
-        }
-        lastCleanupRunTime = System.currentTimeMillis();
-      }
     }
 
     @Override
@@ -792,6 +723,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         "it will have per impact")
     public void run() {
       LOG.info(getName() + ": starting");
+      connectionManager.startIdleScan();
       while (running) {
         SelectionKey key = null;
         try {
@@ -815,7 +747,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             if (errorHandler.checkOOME(e)) {
               LOG.info(getName() + ": exiting on OutOfMemoryError");
               closeCurrentConnection(key, e);
-              cleanupConnections(true);
+              connectionManager.closeIdle(true);
               return;
             }
           } else {
@@ -824,22 +756,18 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             // some thread(s) a chance to finish
             LOG.warn(getName() + ": OutOfMemoryError in server select", e);
             closeCurrentConnection(key, e);
-            cleanupConnections(true);
+            connectionManager.closeIdle(true);
             try {
               Thread.sleep(60000);
             } catch (InterruptedException ex) {
               LOG.debug("Interrupted while sleeping");
-              return;
             }
           }
         } catch (Exception e) {
           closeCurrentConnection(key, e);
         }
-        cleanupConnections(false);
       }
-
       LOG.info(getName() + ": stopping");
-
       synchronized (this) {
         try {
           acceptChannel.close();
@@ -851,10 +779,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         selector= null;
         acceptChannel= null;
 
-        // clean up all connections
-        while (!connectionList.isEmpty()) {
-          closeConnection(connectionList.remove(0));
-        }
+        // close all connections
+        connectionManager.stopIdleScan();
+        connectionManager.closeAll();
       }
     }
 
@@ -862,10 +789,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       if (key != null) {
         Connection c = (Connection)key.attachment();
         if (c != null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() +
-                (e != null ? " on error " + e.getMessage() : ""));
-          }
           closeConnection(c);
           key.attach(null);
         }
@@ -876,37 +799,24 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return address;
     }
 
-    void doAccept(SelectionKey key) throws IOException, OutOfMemoryError {
-      Connection c;
+    void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
       ServerSocketChannel server = (ServerSocketChannel) key.channel();
-
       SocketChannel channel;
       while ((channel = server.accept()) != null) {
-        try {
-          channel.configureBlocking(false);
-          channel.socket().setTcpNoDelay(tcpNoDelay);
-          channel.socket().setKeepAlive(tcpKeepAlive);
-        } catch (IOException ioe) {
-          channel.close();
-          throw ioe;
-        }
-
+        channel.configureBlocking(false);
+        channel.socket().setTcpNoDelay(tcpNoDelay);
+        channel.socket().setKeepAlive(tcpKeepAlive);
         Reader reader = getReader();
-        try {
-          reader.startAdd();
-          SelectionKey readKey = reader.registerChannel(channel);
-          c = getConnection(channel, System.currentTimeMillis());
-          readKey.attach(c);
-          synchronized (connectionList) {
-            connectionList.add(numConnections, c);
-            numConnections++;
+        Connection c = connectionManager.register(channel);
+        // If the connectionManager can't take it, close the connection.
+        if (c == null) {
+          if (channel.isOpen()) {
+            IOUtils.cleanup(null, channel);
           }
-          if (LOG.isDebugEnabled())
-            LOG.debug(getName() + ": connection from " + c.toString() +
-                "; # active connections: " + numConnections);
-        } finally {
-          reader.finishAdd();
+          continue;
         }
+        key.attach(c);  // so closeCurrentConnection can get the object
+        reader.addConnection(c);
       }
     }
 
@@ -919,12 +829,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       c.setLastContact(System.currentTimeMillis());
       try {
         count = c.readAndProcess();
-
-        if (count > 0) {
-          c.setLastContact(System.currentTimeMillis());
-        }
-
       } catch (InterruptedException ieo) {
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         if (LOG.isDebugEnabled()) {
@@ -933,12 +839,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         count = -1; //so that the (count < 0) block is executed
       }
       if (count < 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(getName() + ": DISCONNECTING client " + c.toString() +
-              " because read count=" + count +
-              ". Number of active connections: " + numConnections);
-        }
         closeConnection(c);
+        c = null;
+      } else {
+        c.setLastContact(System.currentTimeMillis());
       }
     }
 
@@ -1355,6 +1259,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return null;
     }
 
+    public long getLastContact() {
+      return lastContact;
+    }
+
     /* Return true if the connection has no outstanding rpc */
     private boolean isIdle() {
       return rpcCount.get() == 0;
@@ -1370,10 +1278,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       rpcCount.increment();
     }
 
-    protected boolean timedOut(long currentTime) {
-      return isIdle() && currentTime - lastContact > maxIdleTime;
-    }
-
     private UserGroupInformation getAuthorizedUgi(String authorizedId)
         throws IOException {
       UserGroupInformation authorizedUgi;
@@ -1883,7 +1787,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
       // Enforcing the call queue size, this triggers a retry in the client
       // This is a bit late to be doing this check - we have already read in the total request.
-      if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) {
+      if ((totalRequestSize + callQueueSizeInBytes.get()) > maxQueueSizeInBytes) {
         final Call callTooBig =
           new Call(id, this.service, null, null, null, null, this,
             responder, totalRequestSize, null, null, 0);
@@ -1954,7 +1858,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
               totalRequestSize, traceInfo, this.addr, timeout);
 
       if (!scheduler.dispatch(new CallRunner(RpcServer.this, call))) {
-        callQueueSize.add(-1 * call.getSize());
+        callQueueSizeInBytes.add(-1 * call.getSize());
 
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
@@ -2093,12 +1997,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     this.bindAddress = bindAddress;
     this.conf = conf;
     this.socketSendBufferSize = 0;
-    this.maxQueueSize =
-      this.conf.getInt("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
+    // See declaration above for documentation on what this size is.
+    this.maxQueueSizeInBytes =
+      this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
     this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
-    this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 1000);
-    this.maxConnectionsToNuke = conf.getInt("hbase.ipc.client.kill.max", 10);
-    this.thresholdIdleConnections = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
     this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
       2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);
@@ -2120,6 +2022,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     // Create the responder here
     responder = new Responder();
+    connectionManager = new ConnectionManager();
     this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
     this.userProvider = UserProvider.instantiate(conf);
     this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled();
@@ -2177,12 +2080,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   protected void closeConnection(Connection connection) {
-    synchronized (connectionList) {
-      if (connectionList.remove(connection)) {
-        numConnections--;
-      }
-    }
-    connection.close();
+    connectionManager.close(connection);
   }
 
   Configuration getConf() {
@@ -2440,7 +2338,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
   @Override
   public void addCallSize(final long diff) {
-    this.callQueueSize.add(diff);
+    this.callQueueSizeInBytes.add(diff);
   }
 
   /**
@@ -2578,6 +2476,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   /**
+   * The number of open RPC conections
+   * @return the number of open rpc connections
+   */
+  public int getNumOpenConnections() {
+    return connectionManager.size();
+  }
+
+  /**
    * Returns the username for any user associated with the current RPC
    * request or <code>null</code> if no user is set.
    */
@@ -2695,4 +2601,150 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   public RpcScheduler getScheduler() {
     return scheduler;
   }
+
+  private class ConnectionManager {
+    final private AtomicInteger count = new AtomicInteger();
+    final private Set<Connection> connections;
+
+    final private Timer idleScanTimer;
+    final private int idleScanThreshold;
+    final private int idleScanInterval;
+    final private int maxIdleTime;
+    final private int maxIdleToClose;
+
+    ConnectionManager() {
+      this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true);
+      this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
+      this.idleScanInterval =
+          conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000);
+      this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000);
+      this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10);
+      int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+          HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+      int maxConnectionQueueSize =
+          handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100);
+      // create a set with concurrency -and- a thread-safe iterator, add 2
+      // for listener and idle closer threads
+      this.connections = Collections.newSetFromMap(
+          new ConcurrentHashMap<Connection,Boolean>(
+              maxConnectionQueueSize, 0.75f, readThreads+2));
+    }
+
+    private boolean add(Connection connection) {
+      boolean added = connections.add(connection);
+      if (added) {
+        count.getAndIncrement();
+      }
+      return added;
+    }
+
+    private boolean remove(Connection connection) {
+      boolean removed = connections.remove(connection);
+      if (removed) {
+        count.getAndDecrement();
+      }
+      return removed;
+    }
+
+    int size() {
+      return count.get();
+    }
+
+    Connection[] toArray() {
+      return connections.toArray(new Connection[0]);
+    }
+
+    Connection register(SocketChannel channel) {
+      Connection connection = new Connection(channel, System.currentTimeMillis());
+      add(connection);
+      if (LOG.isDebugEnabled()) {
+        // Use metric names
+        LOG.debug("Server connection from " + connection +
+            "; numOpenConnections=" + size() +
+            ",  queueSize(bytes)=" + callQueueSizeInBytes.get() +
+            ", numCallsInGeneralQueue=" + scheduler.getGeneralQueueLength() +
+            ", numCallsInPriorityQueue=" + scheduler.getPriorityQueueLength());
+      }
+      return connection;
+    }
+
+    boolean close(Connection connection) {
+      boolean exists = remove(connection);
+      if (exists) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(Thread.currentThread().getName() +
+              ": disconnecting client " + connection +
+              ". Number of active connections: "+ size());
+        }
+        // only close if actually removed to avoid double-closing due
+        // to possible races
+        connection.close();
+      }
+      return exists;
+    }
+
+    // synch'ed to avoid explicit invocation upon OOM from colliding with
+    // timer task firing
+    synchronized void closeIdle(boolean scanAll) {
+      long minLastContact = System.currentTimeMillis() - maxIdleTime;
+      // concurrent iterator might miss new connections added
+      // during the iteration, but that's ok because they won't
+      // be idle yet anyway and will be caught on next scan
+      int closed = 0;
+      for (Connection connection : connections) {
+        // stop if connections dropped below threshold unless scanning all
+        if (!scanAll && size() < idleScanThreshold) {
+          break;
+        }
+        // stop if not scanning all and max connections are closed
+        if (connection.isIdle() &&
+            connection.getLastContact() < minLastContact &&
+            close(connection) &&
+            !scanAll && (++closed == maxIdleToClose)) {
+          break;
+        }
+      }
+    }
+
+    void closeAll() {
+      // use a copy of the connections to be absolutely sure the concurrent
+      // iterator doesn't miss a connection
+      for (Connection connection : toArray()) {
+        close(connection);
+      }
+    }
+
+    void startIdleScan() {
+      scheduleIdleScanTask();
+    }
+
+    void stopIdleScan() {
+      idleScanTimer.cancel();
+    }
+
+    private void scheduleIdleScanTask() {
+      if (!running) {
+        return;
+      }
+      TimerTask idleScanTask = new TimerTask(){
+        @Override
+        public void run() {
+          if (!running) {
+            return;
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(Thread.currentThread().getName()+": task running");
+          }
+          try {
+            closeIdle(false);
+          } finally {
+            // explicitly reschedule so next execution occurs relative
+            // to the end of this scan, not the beginning
+            scheduleIdleScanTask();
+          }
+        }
+      };
+      idleScanTimer.schedule(idleScanTask, idleScanInterval);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0b70c00/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 1f496b4..743c5bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -41,7 +41,7 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   @Override
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-		HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+        HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
 
     return new SimpleRpcScheduler(
       conf,

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0b70c00/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index ceb945b..45cec78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -263,7 +263,7 @@ public abstract class AbstractTestIPC {
       fail("Expected an exception to have been thrown!");
     } catch (Exception e) {
       LOG.info("Caught expected exception: " + e.toString());
-      assertTrue(StringUtils.stringifyException(e).contains("Injected fault"));
+      assertTrue(e.toString(), StringUtils.stringifyException(e).contains("Injected fault"));
     } finally {
       rpcServer.stop();
     }


[39/50] hbase git commit: HBASE-15989 Remove hbase.online.schema.update.enable

Posted by sy...@apache.org.
HBASE-15989 Remove hbase.online.schema.update.enable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9463bcc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9463bcc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9463bcc

Branch: refs/heads/hbase-12439
Commit: d9463bcce0e36bfb67c82acd1d7483f63b2764b7
Parents: d5d9b7d
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Jun 8 13:09:31 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Jun 8 13:09:31 2016 -0700

----------------------------------------------------------------------
 .../client/replication/ReplicationAdmin.java    | 13 +-----
 .../src/main/resources/hbase-default.xml        |  9 +----
 .../procedure/MasterDDLOperationHelper.java     | 15 -------
 .../master/procedure/ModifyTableProcedure.java  |  5 ---
 .../apache/hadoop/hbase/client/TestAdmin1.java  | 42 --------------------
 .../apache/hadoop/hbase/client/TestAdmin2.java  |  1 -
 .../client/TestCloneSnapshotFromClient.java     |  1 -
 .../hbase/client/TestFromClientSide3.java       |  2 -
 .../client/TestRestoreSnapshotFromClient.java   |  1 -
 .../hbase/io/encoding/TestChangingEncoding.java |  1 -
 .../hbase/master/TestTableLockManager.java      |  1 -
 .../regionserver/TestEncryptionKeyRotation.java |  4 +-
 ...sibilityLabelReplicationWithExpAsString.java |  1 -
 .../TestVisibilityLabelsReplication.java        |  1 -
 ...ibilityLabelsWithDefaultVisLabelService.java |  1 -
 .../TestRestoreFlushSnapshotFromClient.java     |  1 -
 .../src/main/ruby/shell/commands/alter.rb       | 16 ++++----
 .../hadoop/hbase/client/AbstractTestShell.java  |  1 -
 .../hbase/client/rsgroup/TestShellRSGroups.java |  1 -
 src/main/asciidoc/_chapters/hbase-default.adoc  | 10 -----
 20 files changed, 12 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
index a2ad2e7..d062448 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java
@@ -516,9 +516,9 @@ public class ReplicationAdmin implements Closeable {
     if (repPeers == null || repPeers.size() <= 0) {
       throw new IllegalArgumentException("Found no peer cluster for replication.");
     }
-    
+
     final TableName onlyTableNameQualifier = TableName.valueOf(tableName.getQualifierAsString());
-    
+
     for (ReplicationPeer repPeer : repPeers) {
       Map<TableName, List<String>> tableCFMap = repPeer.getTableCFs();
       // TODO Currently peer TableCFs will not include namespace so we need to check only for table
@@ -595,20 +595,11 @@ public class ReplicationAdmin implements Closeable {
       admin = this.connection.getAdmin();
       HTableDescriptor htd = admin.getTableDescriptor(tableName);
       if (isTableRepEnabled(htd) ^ isRepEnabled) {
-        boolean isOnlineSchemaUpdateEnabled =
-            this.connection.getConfiguration()
-                .getBoolean("hbase.online.schema.update.enable", true);
-        if (!isOnlineSchemaUpdateEnabled) {
-          admin.disableTable(tableName);
-        }
         for (HColumnDescriptor hcd : htd.getFamilies()) {
           hcd.setScope(isRepEnabled ? HConstants.REPLICATION_SCOPE_GLOBAL
               : HConstants.REPLICATION_SCOPE_LOCAL);
         }
         admin.modifyTable(tableName, htd);
-        if (!isOnlineSchemaUpdateEnabled) {
-          admin.enableTable(tableName);
-        }
       }
     } finally {
       if (admin != null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-common/src/main/resources/hbase-default.xml
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/resources/hbase-default.xml b/hbase-common/src/main/resources/hbase-default.xml
index 62a6b62..55ac497 100644
--- a/hbase-common/src/main/resources/hbase-default.xml
+++ b/hbase-common/src/main/resources/hbase-default.xml
@@ -562,7 +562,7 @@ possible configurations would overwhelm and obscure the important.
   <property>
     <name>hbase.regions.slop</name>
     <value>0.001</value>
-    <description>Rebalance if any regionserver has average + (average * slop) regions. 
+    <description>Rebalance if any regionserver has average + (average * slop) regions.
       The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load balancer),
       while the default is 0.2 in other load balancers (i.e., SimpleLoadBalancer).</description>
   </property>
@@ -865,7 +865,7 @@ possible configurations would overwhelm and obscure the important.
     Must be a multiple of 1024 else you will run into
     'java.io.IOException: Invalid HFile block magic' when you go to read from cache.
     If you specify no values here, then you pick up the default bucketsizes set
-    in code (See BucketAllocator#DEFAULT_BUCKET_SIZES). 
+    in code (See BucketAllocator#DEFAULT_BUCKET_SIZES).
   </description>
   </property>
   <property>
@@ -1132,11 +1132,6 @@ possible configurations would overwhelm and obscure the important.
       of servers, so this is most useful for debugging only.</description>
   </property>
   <property>
-    <name>hbase.online.schema.update.enable</name>
-    <value>true</value>
-    <description>Set true to enable online schema changes.</description>
-  </property>
-  <property>
     <name>hbase.table.lock.enable</name>
     <value>true</value>
     <description>Set to true to enable locking the table in zookeeper for schema change operations.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
index f2ee97f..1214268 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterDDLOperationHelper.java
@@ -56,14 +56,6 @@ public final class MasterDDLOperationHelper {
   private MasterDDLOperationHelper() {}
 
   /**
-   * Check whether online schema change is allowed from config
-   **/
-  public static boolean isOnlineSchemaChangeAllowed(final MasterProcedureEnv env) {
-    return env.getMasterServices().getConfiguration()
-        .getBoolean("hbase.online.schema.update.enable", false);
-  }
-
-  /**
    * Check whether a table is modifiable - exists and either offline or online with config set
    * @param env MasterProcedureEnv
    * @param tableName name of the table
@@ -75,13 +67,6 @@ public final class MasterDDLOperationHelper {
     if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), tableName)) {
       throw new TableNotFoundException(tableName);
     }
-
-    // We only execute this procedure with table online if online schema change config is set.
-    if (!env.getMasterServices().getTableStateManager()
-        .isTableState(tableName, TableState.State.DISABLED)
-        && !MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
-      throw new TableNotDisabledException(tableName);
-    }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index 3f76df3..6c65718 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -300,11 +300,6 @@ public class ModifyTableProcedure
 
     if (env.getMasterServices().getTableStateManager()
         .isTableState(getTableName(), TableState.State.ENABLED)) {
-      // We only execute this procedure with table online if online schema change config is set.
-      if (!MasterDDLOperationHelper.isOnlineSchemaChangeAllowed(env)) {
-        throw new TableNotDisabledException(getTableName());
-      }
-
       if (modifiedHTableDescriptor.getRegionReplication() != unmodifiedHTableDescriptor
           .getRegionReplication()) {
         throw new IOException("REGION_REPLICATION change is not supported for enabled tables");

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index 545fccd..fd55f66 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -85,7 +85,6 @@ public class TestAdmin1 {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
     TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);
@@ -503,8 +502,6 @@ public class TestAdmin1 {
   public void testOnlineChangeTableSchema() throws IOException, InterruptedException {
     final TableName tableName =
         TableName.valueOf("changeTableSchemaOnline");
-    TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
-        "hbase.online.schema.update.enable", true);
     HTableDescriptor [] tables = admin.listTables();
     int numTables = tables.length;
     TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
@@ -588,45 +585,6 @@ public class TestAdmin1 {
     assertFalse(this.admin.tableExists(tableName));
   }
 
-  @Test (timeout=300000)
-  public void testShouldFailOnlineSchemaUpdateIfOnlineSchemaIsNotEnabled()
-      throws Exception {
-    final TableName tableName = TableName.valueOf("changeTableSchemaOnlineFailure");
-    TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
-        "hbase.online.schema.update.enable", false);
-    HTableDescriptor[] tables = admin.listTables();
-    int numTables = tables.length;
-    TEST_UTIL.createTable(tableName, HConstants.CATALOG_FAMILY).close();
-    tables = this.admin.listTables();
-    assertEquals(numTables + 1, tables.length);
-
-    // FIRST, do htabledescriptor changes.
-    HTableDescriptor htd = this.admin.getTableDescriptor(tableName);
-    // Make a copy and assert copy is good.
-    HTableDescriptor copy = new HTableDescriptor(htd);
-    assertTrue(htd.equals(copy));
-    // Now amend the copy. Introduce differences.
-    long newFlushSize = htd.getMemStoreFlushSize() / 2;
-    if (newFlushSize <=0) {
-      newFlushSize = HTableDescriptor.DEFAULT_MEMSTORE_FLUSH_SIZE / 2;
-    }
-    copy.setMemStoreFlushSize(newFlushSize);
-    final String key = "anyoldkey";
-    assertTrue(htd.getValue(key) == null);
-    copy.setValue(key, key);
-    boolean expectedException = false;
-    try {
-      admin.modifyTable(tableName, copy);
-    } catch (TableNotDisabledException re) {
-      expectedException = true;
-    }
-    assertTrue("Online schema update should not happen.", expectedException);
-
-    // Reset the value for the other tests
-    TEST_UTIL.getMiniHBaseCluster().getMaster().getConfiguration().setBoolean(
-        "hbase.online.schema.update.enable", true);
-  }
-
   protected void verifyRoundRobinDistribution(ClusterConnection c, RegionLocator regionLocator, int
       expectedRegions) throws IOException {
     int numRS = c.getCurrentNrHRS();

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
index ff53c49..d088fc4 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin2.java
@@ -83,7 +83,6 @@ public class TestAdmin2 {
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
     TEST_UTIL.getConfiguration().setInt("hbase.client.retries.number", 6);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
index aeb82f4..65a67d0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCloneSnapshotFromClient.java
@@ -61,7 +61,6 @@ public class TestCloneSnapshotFromClient {
 
   protected static void setupConfiguration() {
     TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
index a967d97..a918ce6 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide3.java
@@ -70,8 +70,6 @@ public class TestFromClientSide3 {
    */
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
-    TEST_UTIL.getConfiguration().setBoolean(
-        "hbase.online.schema.update.enable", true);
     TEST_UTIL.startMiniCluster(SLAVES);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
index d31df42..a3fc640 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestRestoreSnapshotFromClient.java
@@ -83,7 +83,6 @@ public class TestRestoreSnapshotFromClient {
 
   protected static void setupConf(Configuration conf) {
     TEST_UTIL.getConfiguration().setBoolean(SnapshotManager.HBASE_SNAPSHOT_ENABLED, true);
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
index 6359bef..6cf4d68 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/encoding/TestChangingEncoding.java
@@ -103,7 +103,6 @@ public class TestChangingEncoding {
     conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024 * 1024);
     // ((Log4JLogger)RpcServerImplementation.LOG).getLogger().setLevel(Level.TRACE);
     // ((Log4JLogger)RpcClient.LOG).getLogger().setLevel(Level.TRACE);
-    conf.setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.startMiniCluster();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
index 573fdcb..36f505b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
@@ -88,7 +88,6 @@ public class TestTableLockManager {
   private static final CountDownLatch addColumn = new CountDownLatch(1);
 
   public void prepareMiniCluster() throws Exception {
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.startMiniCluster(2);
     TEST_UTIL.createTable(TABLE_NAME, FAMILY);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
index 82be1db..cee64e0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestEncryptionKeyRotation.java
@@ -80,8 +80,6 @@ public class TestEncryptionKeyRotation {
     conf.setInt("hfile.format.version", 3);
     conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
     conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
-    // Enable online schema updates
-    conf.setBoolean("hbase.online.schema.update.enable", true);
 
     // Start the minicluster
     TEST_UTIL.startMiniCluster(1);
@@ -229,7 +227,7 @@ public class TestEncryptionKeyRotation {
       }
     }
   }
-  
+
   private static List<Path> findStorefilePaths(TableName tableName) throws Exception {
     List<Path> paths = new ArrayList<Path>();
     for (Region region:

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
index 18a1088..9483ac9 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelReplicationWithExpAsString.java
@@ -80,7 +80,6 @@ public class TestVisibilityLabelReplicationWithExpAsString extends TestVisibilit
     // setup configuration
     conf = HBaseConfiguration.create();
     conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
-    conf.setBoolean("hbase.online.schema.update.enable", true);
     conf.setInt("hfile.format.version", 3);
     conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
     conf.setInt("replication.source.size.capacity", 10240);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
index c9d9530..4ed47b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsReplication.java
@@ -130,7 +130,6 @@ public class TestVisibilityLabelsReplication {
     // setup configuration
     conf = HBaseConfiguration.create();
     conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
-    conf.setBoolean("hbase.online.schema.update.enable", true);
     conf.setInt("hfile.format.version", 3);
     conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1");
     conf.setInt("replication.source.size.capacity", 10240);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
index a229bdb..63c08a2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
@@ -67,7 +67,6 @@ public class TestVisibilityLabelsWithDefaultVisLabelService extends TestVisibili
     // setup configuration
     conf = TEST_UTIL.getConfiguration();
     conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, false);
-    conf.setBoolean("hbase.online.schema.update.enable", true);
     VisibilityTestUtil.enableVisiblityLabels(conf);
     conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class,
         ScanLabelGenerator.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
index 04fce5c..bf26c69 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/snapshot/TestRestoreFlushSnapshotFromClient.java
@@ -70,7 +70,6 @@ public class TestRestoreFlushSnapshotFromClient {
   }
 
   protected static void setupConf(Configuration conf) {
-    UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     UTIL.getConfiguration().setInt("hbase.client.pause", 250);
     UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-shell/src/main/ruby/shell/commands/alter.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/alter.rb b/hbase-shell/src/main/ruby/shell/commands/alter.rb
index 91b3e2e..8d6b6ca 100644
--- a/hbase-shell/src/main/ruby/shell/commands/alter.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/alter.rb
@@ -22,19 +22,17 @@ module Shell
     class Alter < Command
       def help
         return <<-EOF
-Alter a table. If the "hbase.online.schema.update.enable" property is set to
-false, then the table must be disabled (see help 'disable'). If the 
-"hbase.online.schema.update.enable" property is set to true, tables can be 
-altered without disabling them first. Altering enabled tables has caused problems 
-in the past, so use caution and test it before using in production. 
+Alter a table. Tables can be altered without disabling them first.
+Altering enabled tables has caused problems
+in the past, so use caution and test it before using in production.
 
-You can use the alter command to add, 
+You can use the alter command to add,
 modify or delete column families or change table configuration options.
 Column families work in a similar way as the 'create' command. The column family
 specification can either be a name string, or a dictionary with the NAME attribute.
 Dictionaries are described in the output of the 'help' command, with no arguments.
 
-For example, to change or add the 'f1' column family in table 't1' from 
+For example, to change or add the 'f1' column family in table 't1' from
 current value to keep a maximum of 5 cell VERSIONS, do:
 
   hbase> alter 't1', NAME => 'f1', VERSIONS => 5
@@ -48,7 +46,7 @@ To delete the 'f1' column family in table 'ns1:t1', use one of:
   hbase> alter 'ns1:t1', NAME => 'f1', METHOD => 'delete'
   hbase> alter 'ns1:t1', 'delete' => 'f1'
 
-You can also change table-scope attributes like MAX_FILESIZE, READONLY, 
+You can also change table-scope attributes like MAX_FILESIZE, READONLY,
 MEMSTORE_FLUSHSIZE, DURABILITY, etc. These can be put at the end;
 for example, to change the max size of a region to 128MB, do:
 
@@ -85,7 +83,7 @@ You can also set REGION_REPLICATION:
 
 There could be more than one alteration in one command:
 
-  hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 }, 
+  hbase> alter 't1', { NAME => 'f1', VERSIONS => 3 },
    { MAX_FILESIZE => '134217728' }, { METHOD => 'delete', NAME => 'f2' },
    OWNER => 'johndoe', METADATA => { 'mykey' => 'myvalue' }
 EOF

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
index 87d14dd..074b9f7 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
@@ -37,7 +37,6 @@ public abstract class AbstractTestShell {
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
     // Start mini cluster
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
     TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
index 155bdb4..5f3720e 100644
--- a/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
+++ b/hbase-shell/src/test/rsgroup/org/apache/hadoop/hbase/client/rsgroup/TestShellRSGroups.java
@@ -54,7 +54,6 @@ public class TestShellRSGroups {
     basePath = System.getProperty("basedir");
 
     // Start mini cluster
-    TEST_UTIL.getConfiguration().setBoolean("hbase.online.schema.update.enable", true);
     TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
     TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
     TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9463bcc/src/main/asciidoc/_chapters/hbase-default.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/hbase-default.adoc b/src/main/asciidoc/_chapters/hbase-default.adoc
index df750e0..7a65446 100644
--- a/src/main/asciidoc/_chapters/hbase-default.adoc
+++ b/src/main/asciidoc/_chapters/hbase-default.adoc
@@ -1585,16 +1585,6 @@ Set to true to cause the hosting server (master or regionserver)
 `true`
 
 
-[[hbase.online.schema.update.enable]]
-*`hbase.online.schema.update.enable`*::
-+
-.Description
-Set true to enable online schema changes.
-+
-.Default
-`true`
-
-
 [[hbase.table.lock.enable]]
 *`hbase.table.lock.enable`*::
 +


[16/50] hbase git commit: HBASE-15939 Two shell test failures on master (Talat and Ted)

Posted by sy...@apache.org.
HBASE-15939 Two shell test failures on master (Talat and Ted)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72d3f2a8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72d3f2a8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72d3f2a8

Branch: refs/heads/hbase-12439
Commit: 72d3f2a86825808af879ed77a9fafe4c42daed6a
Parents: fc890a2
Author: tedyu <yu...@gmail.com>
Authored: Thu Jun 2 15:44:18 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Thu Jun 2 15:44:18 2016 -0700

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/hbase/hbase.rb                         | 2 +-
 .../java/org/apache/hadoop/hbase/client/AbstractTestShell.java   | 4 ++++
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/72d3f2a8/hbase-shell/src/main/ruby/hbase/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index 797d784..852f349 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -58,7 +58,7 @@ module Hbase
 
     # Create new one each time
     def table(table, shell)
-      ::Hbase::Table.new(@connection.getTable(table), shell)
+      ::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell)
     end
 
     def replication_admin(formatter)

http://git-wip-us.apache.org/repos/asf/hbase/blob/72d3f2a8/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
index ddcc1ea..87d14dd 100644
--- a/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
+++ b/hbase-shell/src/test/java/org/apache/hadoop/hbase/client/AbstractTestShell.java
@@ -44,6 +44,10 @@ public abstract class AbstractTestShell {
     TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);
     TEST_UTIL.getConfiguration().setInt("hfile.format.version", 3);
 
+    // Below settings are necessary for task monitor test.
+    TEST_UTIL.getConfiguration().setInt(HConstants.MASTER_INFO_PORT, 0);
+    TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
+    TEST_UTIL.getConfiguration().setBoolean(HConstants.REGIONSERVER_INFO_PORT_AUTO, true);
     // Security setup configuration
     SecureTestUtil.enableSecurity(TEST_UTIL.getConfiguration());
     VisibilityTestUtil.enableVisiblityLabels(TEST_UTIL.getConfiguration());


[06/50] hbase git commit: HBASE-15923 Shell rows counter test fails

Posted by sy...@apache.org.
HBASE-15923 Shell rows counter test fails


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/015f2ef6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/015f2ef6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/015f2ef6

Branch: refs/heads/hbase-12439
Commit: 015f2ef6292df52270df8845ccd244a97deb9c98
Parents: 73ec338
Author: tedyu <yu...@gmail.com>
Authored: Tue May 31 14:21:32 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Tue May 31 14:21:32 2016 -0700

----------------------------------------------------------------------
 hbase-shell/src/test/ruby/hbase/table_test.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/015f2ef6/hbase-shell/src/test/ruby/hbase/table_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb b/hbase-shell/src/test/ruby/hbase/table_test.rb
index d4547b7..a617bc5 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -561,7 +561,7 @@ module Hbase
     define_test "scan with a block should yield rows and return rows counter" do
       rows = {}
       res = @test_table._scan_internal { |row, cells| rows[row] = cells }
-      assert_equal(rows.keys.size, res)
+      assert_equal([rows.keys.size,false], res)
     end
     
     define_test "scan should support COLUMNS with value CONVERTER information" do


[49/50] hbase git commit: HBASE-15946. Eliminate possible security concerns in Store File metrics.

Posted by sy...@apache.org.
HBASE-15946. Eliminate possible security concerns in Store File metrics.

Invoking 'hbase hfile' inside a servlet raises several concerns. This
patch avoids invoking a separate process, and also adds validation that
the file being read is at least inside the HBase root directory.

Signed-off-by: Mikhail Antonov <an...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6da6babe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6da6babe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6da6babe

Branch: refs/heads/hbase-12439
Commit: 6da6babe4faa7b2b16775d3cd5c861e71ef4cf31
Parents: babdedc
Author: Sean Mackrory <ma...@apache.org>
Authored: Tue May 31 10:28:27 2016 -0600
Committer: Mikhail Antonov <an...@apache.org>
Committed: Thu Jun 9 16:08:19 2016 -0700

----------------------------------------------------------------------
 .../hbase/io/hfile/HFilePrettyPrinter.java      | 108 ++++++++++++-------
 .../hbase-webapps/regionserver/storeFile.jsp    |  35 +++---
 2 files changed, 83 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6da6babe/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index e9e21fe..36067e5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -1,4 +1,3 @@
-
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -115,6 +114,8 @@ public class HFilePrettyPrinter extends Configured implements Tool {
   private Map<String, List<Path>> mobFileLocations;
   private static final int FOUND_MOB_FILES_CACHE_CAPACITY = 50;
   private static final int MISSING_MOB_FILES_CACHE_CAPACITY = 20;
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
 
   /**
    * The row which the user wants to specify and print all the KeyValues for.
@@ -161,6 +162,11 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     options.addOptionGroup(files);
   }
 
+  public void setPrintStreams(PrintStream out, PrintStream err) {
+    this.out = out;
+    this.err = err;
+  }
+
   public boolean parseOptions(String args[]) throws ParseException,
       IOException {
     if (args.length == 0) {
@@ -192,7 +198,7 @@ public class HFilePrettyPrinter extends Configured implements Tool {
         row = Bytes.toBytesBinary(key);
         isSeekToRow = true;
       } else {
-        System.err.println("Invalid row is specified.");
+        err.println("Invalid row is specified.");
         System.exit(-1);
       }
     }
@@ -206,17 +212,17 @@ public class HFilePrettyPrinter extends Configured implements Tool {
       String enc = HRegionInfo.encodeRegionName(rn);
       Path regionDir = new Path(tableDir, enc);
       if (verbose)
-        System.out.println("region dir -> " + regionDir);
+        out.println("region dir -> " + regionDir);
       List<Path> regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()),
           regionDir);
       if (verbose)
-        System.out.println("Number of region files found -> "
+        out.println("Number of region files found -> "
             + regionFiles.size());
       if (verbose) {
         int i = 1;
         for (Path p : regionFiles) {
           if (verbose)
-            System.out.println("Found file[" + i++ + "] -> " + p);
+            out.println("Found file[" + i++ + "] -> " + p);
         }
       }
       files.addAll(regionFiles);
@@ -255,27 +261,46 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     // iterate over all files found
     for (Path fileName : files) {
       try {
-        processFile(fileName);
+        int exitCode = processFile(fileName);
+        if (exitCode != 0) {
+          return exitCode;
+        }
       } catch (IOException ex) {
         LOG.error("Error reading " + fileName, ex);
-        System.exit(-2);
+        return -2;
       }
     }
 
     if (verbose || printKey) {
-      System.out.println("Scanned kv count -> " + count);
+      out.println("Scanned kv count -> " + count);
     }
 
     return 0;
   }
 
-  private void processFile(Path file) throws IOException {
+  public int processFile(Path file) throws IOException {
     if (verbose)
-      System.out.println("Scanning -> " + file);
+      out.println("Scanning -> " + file);
+
+    Path rootPath = FSUtils.getRootDir(getConf());
+    String rootString = rootPath + rootPath.SEPARATOR;
+    if (!file.toString().startsWith(rootString)) {
+      // First we see if fully-qualified URI matches the root dir. It might
+      // also be an absolute path in the same filesystem, so we prepend the FS
+      // of the root dir and see if that fully-qualified URI matches.
+      FileSystem rootFS = rootPath.getFileSystem(getConf());
+      String qualifiedFile = rootFS.getUri().toString() + file.toString();
+      if (!qualifiedFile.startsWith(rootString)) {
+        err.println("ERROR, file (" + file +
+            ") is not in HBase's root directory (" + rootString + ")");
+        return -2;
+      }
+    }
+
     FileSystem fs = file.getFileSystem(getConf());
     if (!fs.exists(file)) {
-      System.err.println("ERROR, file doesnt exist: " + file);
-      System.exit(-2);
+      err.println("ERROR, file doesnt exist: " + file);
+      return -2;
     }
 
     HFile.Reader reader = HFile.createReader(fs, file, new CacheConfig(getConf()), getConf());
@@ -306,12 +331,12 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     }
 
     if (printBlockIndex) {
-      System.out.println("Block Index:");
-      System.out.println(reader.getDataBlockIndexReader());
+      out.println("Block Index:");
+      out.println(reader.getDataBlockIndexReader());
     }
 
     if (printBlockHeaders) {
-      System.out.println("Block Headers:");
+      out.println("Block Headers:");
       /*
        * TODO: this same/similar block iteration logic is used in HFileBlock#blockRange and
        * TestLazyDataBlockDecompression. Refactor?
@@ -327,16 +352,17 @@ public class HFilePrettyPrinter extends Configured implements Tool {
         block = reader.readBlock(offset, -1, /* cacheBlock */ false, /* pread */ false,
           /* isCompaction */ false, /* updateCacheMetrics */ false, null, null);
         offset += block.getOnDiskSizeWithHeader();
-        System.out.println(block);
+        out.println(block);
       }
     }
 
     if (printStats) {
       fileStats.finish();
-      System.out.println("Stats:\n" + fileStats);
+      out.println("Stats:\n" + fileStats);
     }
 
     reader.close();
+    return 0;
   }
 
   private void scanKeysValues(Path file, KeyValueStatsCollector fileStats,
@@ -361,24 +387,24 @@ public class HFilePrettyPrinter extends Configured implements Tool {
       }
       // dump key value
       if (printKey) {
-        System.out.print("K: " + cell);
+        out.print("K: " + cell);
         if (printValue) {
-          System.out.print(" V: "
+          out.print(" V: "
               + Bytes.toStringBinary(cell.getValueArray(), cell.getValueOffset(),
                   cell.getValueLength()));
           int i = 0;
           List<Tag> tags = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(),
               cell.getTagsLength());
           for (Tag tag : tags) {
-            System.out.print(String.format(" T[%d]: %s", i++, tag.toString()));
+            out.print(String.format(" T[%d]: %s", i++, tag.toString()));
           }
         }
-        System.out.println();
+        out.println();
       }
       // check if rows are in order
       if (checkRow && pCell != null) {
         if (CellComparator.COMPARATOR.compareRows(pCell, cell) > 0) {
-          System.err.println("WARNING, previous row is greater then"
+          err.println("WARNING, previous row is greater then"
               + " current row\n\tfilename -> " + file + "\n\tprevious -> "
               + CellUtil.getCellKeyAsString(pCell) + "\n\tcurrent  -> "
               + CellUtil.getCellKeyAsString(cell));
@@ -389,12 +415,12 @@ public class HFilePrettyPrinter extends Configured implements Tool {
         String fam = Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(),
             cell.getFamilyLength());
         if (!file.toString().contains(fam)) {
-          System.err.println("WARNING, filename does not match kv family,"
+          err.println("WARNING, filename does not match kv family,"
               + "\n\tfilename -> " + file + "\n\tkeyvalue -> "
               + CellUtil.getCellKeyAsString(cell));
         }
         if (pCell != null && CellComparator.compareFamilies(pCell, cell) != 0) {
-          System.err.println("WARNING, previous kv has different family"
+          err.println("WARNING, previous kv has different family"
               + " compared to current key\n\tfilename -> " + file
               + "\n\tprevious -> " + CellUtil.getCellKeyAsString(pCell)
               + "\n\tcurrent  -> " + CellUtil.getCellKeyAsString(cell));
@@ -492,32 +518,32 @@ public class HFilePrettyPrinter extends Configured implements Tool {
 
   private void printMeta(HFile.Reader reader, Map<byte[], byte[]> fileInfo)
       throws IOException {
-    System.out.println("Block index size as per heapsize: "
+    out.println("Block index size as per heapsize: "
         + reader.indexSize());
-    System.out.println(asSeparateLines(reader.toString()));
-    System.out.println("Trailer:\n    "
+    out.println(asSeparateLines(reader.toString()));
+    out.println("Trailer:\n    "
         + asSeparateLines(reader.getTrailer().toString()));
-    System.out.println("Fileinfo:");
+    out.println("Fileinfo:");
     for (Map.Entry<byte[], byte[]> e : fileInfo.entrySet()) {
-      System.out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
+      out.print(FOUR_SPACES + Bytes.toString(e.getKey()) + " = ");
       if (Bytes.compareTo(e.getKey(), Bytes.toBytes("MAX_SEQ_ID_KEY")) == 0) {
         long seqid = Bytes.toLong(e.getValue());
-        System.out.println(seqid);
+        out.println(seqid);
       } else if (Bytes.compareTo(e.getKey(), Bytes.toBytes("TIMERANGE")) == 0) {
         TimeRangeTracker timeRangeTracker = TimeRangeTracker.getTimeRangeTracker(e.getValue());
-        System.out.println(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax());
+        out.println(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax());
       } else if (Bytes.compareTo(e.getKey(), FileInfo.AVG_KEY_LEN) == 0
           || Bytes.compareTo(e.getKey(), FileInfo.AVG_VALUE_LEN) == 0) {
-        System.out.println(Bytes.toInt(e.getValue()));
+        out.println(Bytes.toInt(e.getValue()));
       } else {
-        System.out.println(Bytes.toStringBinary(e.getValue()));
+        out.println(Bytes.toStringBinary(e.getValue()));
       }
     }
 
     try {
-      System.out.println("Mid-key: " + (CellUtil.getCellKeyAsString(reader.midkey())));
+      out.println("Mid-key: " + (CellUtil.getCellKeyAsString(reader.midkey())));
     } catch (Exception e) {
-      System.out.println ("Unable to retrieve the midkey");
+      out.println ("Unable to retrieve the midkey");
     }
 
     // Printing general bloom information
@@ -526,12 +552,12 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     if (bloomMeta != null)
       bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
 
-    System.out.println("Bloom filter:");
+    out.println("Bloom filter:");
     if (bloomFilter != null) {
-      System.out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
+      out.println(FOUR_SPACES + bloomFilter.toString().replaceAll(
           BloomFilterUtil.STATS_RECORD_SEP, "\n" + FOUR_SPACES));
     } else {
-      System.out.println(FOUR_SPACES + "Not present");
+      out.println(FOUR_SPACES + "Not present");
     }
 
     // Printing delete bloom information
@@ -540,13 +566,13 @@ public class HFilePrettyPrinter extends Configured implements Tool {
     if (bloomMeta != null)
       bloomFilter = BloomFilterFactory.createFromMeta(bloomMeta, reader);
 
-    System.out.println("Delete Family Bloom filter:");
+    out.println("Delete Family Bloom filter:");
     if (bloomFilter != null) {
-      System.out.println(FOUR_SPACES
+      out.println(FOUR_SPACES
           + bloomFilter.toString().replaceAll(BloomFilterUtil.STATS_RECORD_SEP,
               "\n" + FOUR_SPACES));
     } else {
-      System.out.println(FOUR_SPACES + "Not present");
+      out.println(FOUR_SPACES + "Not present");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6da6babe/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
index cbbb61f..fe8cfe0 100644
--- a/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/regionserver/storeFile.jsp
@@ -18,20 +18,15 @@
  */
 --%>
 <%@ page contentType="text/html;charset=UTF-8"
-  import="java.util.Collection"
-  import="java.util.Date"
-  import="java.util.List"
   import="java.io.ByteArrayOutputStream"
   import="java.io.PrintStream"
-  import="java.io.BufferedReader"
-  import="java.io.InputStreamReader"
   import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.fs.Path"
   import="org.apache.hadoop.hbase.HBaseConfiguration"
   import="org.apache.hadoop.hbase.io.hfile.HFilePrettyPrinter"
   import="org.apache.hadoop.hbase.regionserver.HRegionServer"
-  import="org.apache.hadoop.hbase.regionserver.Region"
-  import="org.apache.hadoop.hbase.regionserver.Store"
-  import="org.apache.hadoop.hbase.regionserver.StoreFile"%>
+  import="org.apache.hadoop.hbase.regionserver.StoreFile"
+  %>
 <%
   String storeFile = request.getParameter("name");
   HRegionServer rs = (HRegionServer) getServletContext().getAttribute(HRegionServer.REGIONSERVER);
@@ -91,17 +86,19 @@
     <pre>
 <%
    try {
-     ProcessBuilder pb=new ProcessBuilder("hbase", "hfile", "-s", "-f", storeFile);
-     pb.redirectErrorStream(true);
-     Process pr = pb.start();
-     BufferedReader in = new BufferedReader(new InputStreamReader(pr.getInputStream()));
-     String line;
-     while ((line = in.readLine()) != null) {%>
-       <%= line %>
-     <%}
-     pr.waitFor();
-     in.close();
-   }
+     ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+     PrintStream printerOutput = new PrintStream(byteStream);
+     HFilePrettyPrinter printer = new HFilePrettyPrinter();
+     printer.setPrintStreams(printerOutput, printerOutput);
+     printer.setConf(conf);
+     String[] options = {"-s"};
+     printer.parseOptions(options);
+     printer.processFile(new Path(storeFile));
+     String text = byteStream.toString();%>
+     <%=
+       text
+     %>
+   <%}
    catch (Exception e) {%>
      <%= e %>
    <%}


[50/50] hbase git commit: HBASE-16004 Update to Netty 4.1.1

Posted by sy...@apache.org.
HBASE-16004 Update to Netty 4.1.1

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bd45cf34
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bd45cf34
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bd45cf34

Branch: refs/heads/hbase-12439
Commit: bd45cf34762332a3a51f605798a3e050e7a1e62e
Parents: 6da6bab
Author: Jurriaan Mous <ju...@jurmo.us>
Authored: Fri Jun 10 17:57:42 2016 +0200
Committer: stack <st...@apache.org>
Committed: Fri Jun 10 12:14:05 2016 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java    | 4 ++--
 pom.xml                                                          | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bd45cf34/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
index c1ed748..723a234 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcClient.java
@@ -347,13 +347,13 @@ public class AsyncRpcClient extends AbstractRpcClient {
     }
     // do not close global EventLoopGroup.
     if (!useGlobalEventLoopGroup) {
-      bootstrap.group().shutdownGracefully();
+      bootstrap.config().group().shutdownGracefully();
     }
   }
 
   @Override
   public EventLoop getEventExecutor() {
-    return this.bootstrap.group().next();
+    return this.bootstrap.config().group().next();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/bd45cf34/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 0e33ae8..93b7a0e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1254,7 +1254,7 @@
     <clover.version>4.0.3</clover.version>
     <jamon-runtime.version>2.4.1</jamon-runtime.version>
     <jettison.version>1.3.3</jettison.version>
-    <netty.version>4.0.30.Final</netty.version>
+    <netty.version>4.1.1.Final</netty.version>
     <netty.hadoop.version>3.6.2.Final</netty.hadoop.version>
     <joni.version>2.1.2</joni.version>
     <jcodings.version>1.0.8</jcodings.version>


[08/50] hbase git commit: HBASE-15931 Add log for long-running tasks in AsyncProcess

Posted by sy...@apache.org.
HBASE-15931 Add log for long-running tasks in AsyncProcess


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/53eb27bb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/53eb27bb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/53eb27bb

Branch: refs/heads/hbase-12439
Commit: 53eb27bb60bc736f0c56c7a2facfb6e6ccb91be5
Parents: cbb95cd
Author: Yu Li <li...@apache.org>
Authored: Thu Jun 2 12:00:42 2016 +0800
Committer: Yu Li <li...@apache.org>
Committed: Thu Jun 2 12:00:42 2016 +0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/AsyncProcess.java       | 53 ++++++++++++++++----
 .../hbase/client/BufferedMutatorImpl.java       |  3 +-
 .../hadoop/hbase/client/TestAsyncProcess.java   |  5 +-
 3 files changed, 49 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/53eb27bb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 6f7ba59..812e4bf 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -120,6 +120,12 @@ class AsyncProcess {
    */
   public static final String LOG_DETAILS_FOR_BATCH_ERROR = "hbase.client.log.batcherrors.details";
 
+  private final int thresholdToLogUndoneTaskDetails;
+  private static final String THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS =
+      "hbase.client.threshold.log.details";
+  private static final int DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS = 10;
+  private final int THRESHOLD_TO_LOG_REGION_DETAILS = 2;
+
   /**
    * The context used to wait for results from one submit call.
    * 1) If AsyncProcess is set to track errors globally, and not per call (for HTable puts),
@@ -332,6 +338,10 @@ class AsyncProcess {
     this.rpcCallerFactory = rpcCaller;
     this.rpcFactory = rpcFactory;
     this.logBatchErrorDetails = conf.getBoolean(LOG_DETAILS_FOR_BATCH_ERROR, false);
+
+    this.thresholdToLogUndoneTaskDetails =
+        conf.getInt(THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS,
+          DEFAULT_THRESHOLD_TO_LOG_UNDONE_TASK_DETAILS);
   }
 
   /**
@@ -389,7 +399,7 @@ class AsyncProcess {
     List<Integer> locationErrorRows = null;
     do {
       // Wait until there is at least one slot for a new task.
-      waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1);
+      waitForMaximumCurrentTasks(maxTotalConcurrentTasks - 1, tableName.getNameAsString());
 
       // Remember the previous decisions about regions or region servers we put in the
       //  final multi.
@@ -1765,18 +1775,19 @@ class AsyncProcess {
   @VisibleForTesting
   /** Waits until all outstanding tasks are done. Used in tests. */
   void waitUntilDone() throws InterruptedIOException {
-    waitForMaximumCurrentTasks(0);
+    waitForMaximumCurrentTasks(0, null);
   }
 
   /** Wait until the async does not have more than max tasks in progress. */
-  private void waitForMaximumCurrentTasks(int max) throws InterruptedIOException {
-    waitForMaximumCurrentTasks(max, tasksInProgress, id);
+  private void waitForMaximumCurrentTasks(int max, String tableName)
+      throws InterruptedIOException {
+    waitForMaximumCurrentTasks(max, tasksInProgress, id, tableName);
   }
 
   // Break out this method so testable
   @VisibleForTesting
-  static void waitForMaximumCurrentTasks(int max, final AtomicLong tasksInProgress, final long id)
-  throws InterruptedIOException {
+  void waitForMaximumCurrentTasks(int max, final AtomicLong tasksInProgress, final long id,
+      String tableName) throws InterruptedIOException {
     long lastLog = EnvironmentEdgeManager.currentTime();
     long currentInProgress, oldInProgress = Long.MAX_VALUE;
     while ((currentInProgress = tasksInProgress.get()) > max) {
@@ -1785,7 +1796,11 @@ class AsyncProcess {
         if (now > lastLog + 10000) {
           lastLog = now;
           LOG.info("#" + id + ", waiting for some tasks to finish. Expected max="
-              + max + ", tasksInProgress=" + currentInProgress);
+              + max + ", tasksInProgress=" + currentInProgress +
+              " hasError=" + hasError() + tableName == null ? "" : ", tableName=" + tableName);
+          if (currentInProgress <= thresholdToLogUndoneTaskDetails) {
+            logDetailsOfUndoneTasks(currentInProgress);
+          }
         }
       }
       oldInProgress = currentInProgress;
@@ -1802,6 +1817,25 @@ class AsyncProcess {
     }
   }
 
+  private void logDetailsOfUndoneTasks(long taskInProgress) {
+    ArrayList<ServerName> servers = new ArrayList<ServerName>();
+    for (Map.Entry<ServerName, AtomicInteger> entry : taskCounterPerServer.entrySet()) {
+      if (entry.getValue().get() > 0) {
+        servers.add(entry.getKey());
+      }
+    }
+    LOG.info("Left over " + taskInProgress + " task(s) are processed on server(s): " + servers);
+    if (taskInProgress <= THRESHOLD_TO_LOG_REGION_DETAILS) {
+      ArrayList<String> regions = new ArrayList<String>();
+      for (Map.Entry<byte[], AtomicInteger> entry : taskCounterPerRegion.entrySet()) {
+        if (entry.getValue().get() > 0) {
+          regions.add(Bytes.toString(entry.getKey()));
+        }
+      }
+      LOG.info("Regions against which left over task(s) are processed: " + regions);
+    }
+  }
+
   /**
    * Only used w/useGlobalErrors ctor argument, for HTable backward compat.
    * @return Whether there were any errors in any request since the last time
@@ -1817,12 +1851,13 @@ class AsyncProcess {
    * failed operations themselves.
    * @param failedRows an optional list into which the rows that failed since the last time
    *        {@link #waitForAllPreviousOpsAndReset(List)} was called, or AP was created, are saved.
+   * @param tableName name of the table
    * @return all the errors since the last time {@link #waitForAllPreviousOpsAndReset(List)}
    *          was called, or AP was created.
    */
   public RetriesExhaustedWithDetailsException waitForAllPreviousOpsAndReset(
-      List<Row> failedRows) throws InterruptedIOException {
-    waitForMaximumCurrentTasks(0);
+      List<Row> failedRows, String tableName) throws InterruptedIOException {
+    waitForMaximumCurrentTasks(0, tableName);
     if (!globalErrors.hasErrors()) {
       return null;
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/53eb27bb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index 2a7effe..e98ad4e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -238,7 +238,8 @@ public class BufferedMutatorImpl implements BufferedMutator {
         while (!buffer.isEmpty()) {
           ap.submit(tableName, buffer, true, null, false);
         }
-        RetriesExhaustedWithDetailsException error = ap.waitForAllPreviousOpsAndReset(null);
+        RetriesExhaustedWithDetailsException error =
+            ap.waitForAllPreviousOpsAndReset(null, tableName.getNameAsString());
         if (error != null) {
           if (listener == null) {
             throw error;

http://git-wip-us.apache.org/repos/asf/hbase/blob/53eb27bb/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index 839a33a..d943316 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -1135,16 +1135,17 @@ public class TestAsyncProcess {
   }
 
   @Test
-  public void testWaitForMaximumCurrentTasks() throws InterruptedException, BrokenBarrierException {
+  public void testWaitForMaximumCurrentTasks() throws Exception {
     final AtomicLong tasks = new AtomicLong(0);
     final AtomicInteger max = new AtomicInteger(0);
     final CyclicBarrier barrier = new CyclicBarrier(2);
+    final AsyncProcess ap = new MyAsyncProcess(createHConnection(), conf);
     Runnable runnable = new Runnable() {
       @Override
       public void run() {
         try {
           barrier.await();
-          AsyncProcess.waitForMaximumCurrentTasks(max.get(), tasks, 1);
+          ap.waitForMaximumCurrentTasks(max.get(), tasks, 1, null);
         } catch (InterruptedIOException e) {
           Assert.fail(e.getMessage());
         } catch (InterruptedException e) {


[28/50] hbase git commit: HBASE-15803 ZooKeeperWatcher's constructor can leak a ZooKeeper instance with throwing ZooKeeperConnectionException when canCreateBaseZNode is true

Posted by sy...@apache.org.
HBASE-15803 ZooKeeperWatcher's constructor can leak a ZooKeeper instance with throwing ZooKeeperConnectionException when canCreateBaseZNode is true


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7fd3532d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7fd3532d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7fd3532d

Branch: refs/heads/hbase-12439
Commit: 7fd3532de63d7b1885d6993c11a35c2f85e26631
Parents: 15c03fd
Author: tedyu <yu...@gmail.com>
Authored: Mon Jun 6 18:35:15 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Mon Jun 6 18:35:15 2016 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java | 12 +++++++++++-
 1 file changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7fd3532d/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 93828eb..ff3d1c7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -171,7 +171,17 @@ public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
     this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier);
     pendingWatcher.prepare(this);
     if (canCreateBaseZNode) {
-      createBaseZNodes();
+      try {
+        createBaseZNodes();
+      } catch (ZooKeeperConnectionException zce) {
+        try {
+          this.recoverableZooKeeper.close();
+        } catch (InterruptedException ie) {
+          LOG.debug("Encountered InterruptedException when closing " + this.recoverableZooKeeper);
+          Thread.currentThread().interrupt();
+        }
+        throw zce;
+      }
     }
   }
 


[07/50] hbase git commit: HBASE-15932 Shell test fails due to uninitialized constant

Posted by sy...@apache.org.
HBASE-15932 Shell test fails due to uninitialized constant


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cbb95cd3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cbb95cd3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cbb95cd3

Branch: refs/heads/hbase-12439
Commit: cbb95cd3a9bf9a9f8558560ae58f4061a73f15a8
Parents: 015f2ef
Author: tedyu <yu...@gmail.com>
Authored: Wed Jun 1 10:35:00 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Wed Jun 1 10:35:00 2016 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java   | 2 +-
 hbase-shell/src/main/ruby/hbase.rb                                 | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cbb95cd3/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 799bf0b..b75e8cd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -64,7 +64,7 @@ public class HColumnDescriptor implements Comparable<HColumnDescriptor> {
   // Version 11 -- add column family level configuration.
   private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11;
 
-  private static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
+  public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION";
 
   // These constants are used as FileInfo keys
   public static final String COMPRESSION = "COMPRESSION";

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbb95cd3/hbase-shell/src/main/ruby/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb
index 189fa3a..bc6f37c 100644
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase.rb
@@ -39,7 +39,7 @@ module HBaseConstants
   NAME = org.apache.hadoop.hbase.HConstants::NAME
   VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
   IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
-  IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HConstants::IN_MEMORY_COMPACTION
+  IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION
   METADATA = org.apache.hadoop.hbase.HConstants::METADATA
   STOPROW = "STOPROW"
   STARTROW = "STARTROW"


[24/50] hbase git commit: HBASE-15849 Simplify the way we handle runtime of commands. Functions format_simple_command and format_and_return_simple_command are used to print runtimes right now. They are called from within every single command and use Ruby

Posted by sy...@apache.org.
HBASE-15849 Simplify the way we handle runtime of commands.
Functions format_simple_command and format_and_return_simple_command are used to print runtimes right now. They are called from within every single command and use Ruby's 'yield' magic.  Instead, we can simplify it using 'command_safe' function. Since command_safe wraps all commands, we can simply time before and after we call individual command.
If a command only wants to time a part of its logic, it can set instance variables start_time and end_time accordingly which is far more simpler to understand and work with than 'yield'.

Change-Id: Ibfacf3593175af22fc4f7d80896dd2f6d7c5dde3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70762faa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70762faa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70762faa

Branch: refs/heads/hbase-12439
Commit: 70762faa98e6ad26f10fa8bd9ebc1824fdcb507c
Parents: de1b5ff
Author: Apekshit Sharma <ap...@apache.org>
Authored: Fri Jun 3 14:13:45 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Fri Jun 3 15:50:03 2016 -0700

----------------------------------------------------------------------
 hbase-shell/src/main/ruby/shell.rb              |  7 ++---
 hbase-shell/src/main/ruby/shell/commands.rb     | 27 +++++++-------------
 .../main/ruby/shell/commands/abort_procedure.rb |  6 +----
 .../src/main/ruby/shell/commands/add_labels.rb  |  4 +--
 .../src/main/ruby/shell/commands/add_peer.rb    |  4 +--
 .../src/main/ruby/shell/commands/alter.rb       |  4 +--
 .../src/main/ruby/shell/commands/alter_async.rb |  4 +--
 .../main/ruby/shell/commands/alter_namespace.rb |  4 +--
 .../src/main/ruby/shell/commands/append.rb      | 10 ++++----
 .../ruby/shell/commands/append_peer_tableCFs.rb |  4 +--
 .../src/main/ruby/shell/commands/assign.rb      |  4 +--
 .../main/ruby/shell/commands/balance_switch.rb  |  6 +----
 .../src/main/ruby/shell/commands/balancer.rb    | 14 +++++-----
 .../ruby/shell/commands/balancer_enabled.rb     |  6 +----
 .../shell/commands/catalogjanitor_enabled.rb    |  6 +----
 .../ruby/shell/commands/catalogjanitor_run.rb   |  4 +--
 .../shell/commands/catalogjanitor_switch.rb     |  6 +----
 .../src/main/ruby/shell/commands/clear_auths.rb |  4 +--
 .../main/ruby/shell/commands/clone_snapshot.rb  |  4 +--
 .../main/ruby/shell/commands/close_region.rb    |  4 +--
 .../src/main/ruby/shell/commands/compact.rb     |  4 +--
 .../src/main/ruby/shell/commands/compact_rs.rb  |  4 +--
 .../src/main/ruby/shell/commands/count.rb       |  4 +--
 .../src/main/ruby/shell/commands/create.rb      |  7 +++--
 .../ruby/shell/commands/create_namespace.rb     |  4 +--
 .../src/main/ruby/shell/commands/delete.rb      | 13 +++++-----
 .../ruby/shell/commands/delete_all_snapshot.rb  |  6 ++---
 .../main/ruby/shell/commands/delete_snapshot.rb |  4 +--
 .../shell/commands/delete_table_snapshots.rb    | 19 +++++++-------
 .../src/main/ruby/shell/commands/deleteall.rb   |  5 ++--
 .../src/main/ruby/shell/commands/describe.rb    |  4 +--
 .../ruby/shell/commands/describe_namespace.rb   |  3 ---
 .../src/main/ruby/shell/commands/disable.rb     |  4 +--
 .../main/ruby/shell/commands/disable_peer.rb    |  4 +--
 .../shell/commands/disable_table_replication.rb |  6 ++---
 .../src/main/ruby/shell/commands/drop.rb        |  4 +--
 .../main/ruby/shell/commands/drop_namespace.rb  |  4 +--
 .../src/main/ruby/shell/commands/enable.rb      |  4 +--
 .../src/main/ruby/shell/commands/enable_peer.rb |  4 +--
 .../shell/commands/enable_table_replication.rb  |  6 ++---
 .../src/main/ruby/shell/commands/exists.rb      |  4 +--
 .../src/main/ruby/shell/commands/flush.rb       |  4 +--
 hbase-shell/src/main/ruby/shell/commands/get.rb |  4 +--
 .../src/main/ruby/shell/commands/get_auths.rb   |  8 +++---
 .../main/ruby/shell/commands/get_peer_config.rb |  7 +++--
 .../src/main/ruby/shell/commands/get_table.rb   |  4 +--
 .../src/main/ruby/shell/commands/grant.rb       |  9 +++----
 .../src/main/ruby/shell/commands/incr.rb        | 12 ++++-----
 .../src/main/ruby/shell/commands/is_disabled.rb |  8 ++----
 .../src/main/ruby/shell/commands/is_enabled.rb  |  6 +----
 .../src/main/ruby/shell/commands/list.rb        |  3 +--
 .../src/main/ruby/shell/commands/list_labels.rb |  8 +++---
 .../main/ruby/shell/commands/list_namespace.rb  |  3 +--
 .../shell/commands/list_namespace_tables.rb     |  3 +--
 .../ruby/shell/commands/list_peer_configs.rb    | 18 ++++++-------
 .../src/main/ruby/shell/commands/list_peers.rb  |  3 +--
 .../main/ruby/shell/commands/list_procedures.rb |  3 +--
 .../src/main/ruby/shell/commands/list_quotas.rb |  3 +--
 .../shell/commands/list_replicated_tables.rb    |  4 +--
 .../main/ruby/shell/commands/list_snapshots.rb  |  3 +--
 .../ruby/shell/commands/list_table_snapshots.rb |  3 +--
 .../main/ruby/shell/commands/locate_region.rb   |  4 +--
 .../main/ruby/shell/commands/major_compact.rb   |  4 +--
 .../main/ruby/shell/commands/merge_region.rb    |  4 +--
 .../src/main/ruby/shell/commands/move.rb        |  4 +--
 .../src/main/ruby/shell/commands/normalize.rb   |  6 +----
 .../ruby/shell/commands/normalizer_enabled.rb   |  6 +----
 .../ruby/shell/commands/normalizer_switch.rb    |  6 +----
 hbase-shell/src/main/ruby/shell/commands/put.rb |  5 ++--
 .../src/main/ruby/shell/commands/remove_peer.rb |  4 +--
 .../ruby/shell/commands/remove_peer_tableCFs.rb |  4 +--
 .../ruby/shell/commands/restore_snapshot.rb     |  4 +--
 .../src/main/ruby/shell/commands/revoke.rb      |  4 +--
 .../src/main/ruby/shell/commands/scan.rb        |  5 ++--
 .../src/main/ruby/shell/commands/set_auths.rb   |  4 +--
 .../ruby/shell/commands/set_peer_tableCFs.rb    |  4 +--
 .../main/ruby/shell/commands/set_visibility.rb  |  4 +--
 .../main/ruby/shell/commands/show_filters.rb    |  1 -
 .../src/main/ruby/shell/commands/snapshot.rb    |  4 +--
 .../src/main/ruby/shell/commands/split.rb       |  4 +--
 .../ruby/shell/commands/splitormerge_enabled.rb |  8 +++---
 .../ruby/shell/commands/splitormerge_switch.rb  |  8 +++---
 .../src/main/ruby/shell/commands/trace.rb       |  4 +--
 .../src/main/ruby/shell/commands/truncate.rb    |  6 ++---
 .../ruby/shell/commands/truncate_preserve.rb    |  6 ++---
 .../src/main/ruby/shell/commands/unassign.rb    |  4 +--
 .../ruby/shell/commands/update_all_config.rb    |  4 +--
 .../main/ruby/shell/commands/update_config.rb   |  4 +--
 .../ruby/shell/commands/update_peer_config.rb   |  4 +--
 .../main/ruby/shell/commands/user_permission.rb |  4 +--
 .../src/main/ruby/shell/commands/wal_roll.rb    |  5 ++--
 hbase-shell/src/main/ruby/shell/formatter.rb    | 11 +++-----
 .../src/test/ruby/shell/formatter_test.rb       |  2 +-
 hbase-shell/src/test/ruby/shell/shell_test.rb   | 12 ++++++---
 hbase-shell/src/test/ruby/test_helper.rb        |  2 +-
 95 files changed, 177 insertions(+), 353 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index fa1f8b8..35626d9 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -81,8 +81,9 @@ module Shell
       self.interactive = interactive
     end
 
-    def hbase_admin
-      @hbase_admin ||= hbase.admin()
+    # Returns Admin class from admin.rb
+    def admin
+      @admin ||= hbase.admin()
     end
 
     def hbase_taskmonitor
@@ -143,7 +144,7 @@ module Shell
     # method_name - name of the method on the command to call. Defaults to just 'command'
     # args - to be passed to the named method
     def internal_command(command, method_name= :command, *args)
-      command_instance(command).command_safe(self.debug,method_name, *args)
+      command_instance(command).command_safe(self.debug, method_name, *args)
     end
 
     def print_banner

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index b9446dd..f86904c 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -29,9 +29,12 @@ module Shell
       # cmd - command name to execute
       # args - arguments to pass to the command
       def command_safe(debug, cmd = :command, *args)
+        # Commands can overwrite start_time to skip time used in some kind of setup.
+        # See count.rb for example.
+        @start_time = Time.now
         # send is internal ruby method to call 'cmd' with *args
         #(everything is a message, so this is just the formal semantics to support that idiom)
-        translate_hbase_exceptions(*args) { send(cmd,*args) }
+        translate_hbase_exceptions(*args) { send(cmd, *args) }
       rescue => e
         rootCause = e
         while rootCause != nil && rootCause.respond_to?(:cause) && rootCause.cause != nil
@@ -48,13 +51,16 @@ module Shell
         else
           raise rootCause
         end
+      ensure
+        # If end_time is not already set by the command, use current time.
+        @end_time ||= Time.now
+        formatter.output_str("Took %.4f seconds" % [@end_time - @start_time])
       end
 
       # Convenience functions to get different admins
-
       # Returns HBase::Admin ruby class.
       def admin
-        @shell.hbase_admin
+        @shell.admin
       end
 
       def taskmonitor
@@ -91,21 +97,6 @@ module Shell
         @formatter ||= ::Shell::Formatter::Console.new
       end
 
-      def format_simple_command
-        now = Time.now
-        yield
-        formatter.header
-        formatter.footer(now)
-      end
-
-      def format_and_return_simple_command
-        now = Time.now
-        ret = yield
-        formatter.header
-        formatter.footer(now)
-        return ret
-      end
-
       def translate_hbase_exceptions(*args)
         yield
       rescue => e

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb b/hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb
index 6f77ab7..e69e133 100644
--- a/hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/abort_procedure.rb
@@ -40,11 +40,7 @@ EOF
       end
 
       def command(proc_id, may_interrupt_if_running=nil)
-        format_simple_command do
-          formatter.row([
-            admin.abort_procedure?(proc_id, may_interrupt_if_running).to_s
-          ])
-        end
+        formatter.row([admin.abort_procedure?(proc_id, may_interrupt_if_running).to_s])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/add_labels.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_labels.rb b/hbase-shell/src/main/ruby/shell/commands/add_labels.rb
index 65a1140..7bde5fb 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_labels.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_labels.rb
@@ -31,9 +31,7 @@ EOF
       end
 
       def command(*args)
-        format_simple_command do
-          visibility_labels_admin.add_labels(args)
-        end
+        visibility_labels_admin.add_labels(args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
index 498f79f..558e86d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/add_peer.rb
@@ -58,9 +58,7 @@ EOF
       end
 
       def command(id, args = {}, peer_tableCFs = nil)
-        format_simple_command do
-          replication_admin.add_peer(id, args, peer_tableCFs)
-        end
+        replication_admin.add_peer(id, args, peer_tableCFs)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/alter.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/alter.rb b/hbase-shell/src/main/ruby/shell/commands/alter.rb
index 2c3aa6f..91b3e2e 100644
--- a/hbase-shell/src/main/ruby/shell/commands/alter.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/alter.rb
@@ -92,9 +92,7 @@ EOF
       end
 
       def command(table, *args)
-        format_simple_command do
-          admin.alter(table, true, *args)
-        end
+        admin.alter(table, true, *args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/alter_async.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/alter_async.rb b/hbase-shell/src/main/ruby/shell/commands/alter_async.rb
index bddff01..e0f6deb 100644
--- a/hbase-shell/src/main/ruby/shell/commands/alter_async.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/alter_async.rb
@@ -56,9 +56,7 @@ EOF
       end
 
       def command(table, *args)
-        format_simple_command do
-          admin.alter(table, false, *args)
-        end
+        admin.alter(table, false, *args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb b/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb
index a16e10d..0051c7f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/alter_namespace.rb
@@ -35,9 +35,7 @@ EOF
       end
 
       def command(namespace, *args)
-        format_simple_command do
-          admin.alter_namespace(namespace, *args)
-        end
+        admin.alter_namespace(namespace, *args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/append.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/append.rb b/hbase-shell/src/main/ruby/shell/commands/append.rb
index a0ef36d..93a4317 100644
--- a/hbase-shell/src/main/ruby/shell/commands/append.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/append.rb
@@ -35,14 +35,14 @@ t to table 't1', the corresponding command would be:
 EOF
       end
 
-      def command(table, row, column, value, args={})
-        append(table(table), row, column, value, args)
+      def command(table_name, row, column, value, args={})
+        table = table(table_name)
+        @start_time = Time.now
+        append(table, row, column, value, args)
       end
 
       def append(table, row, column, value, args={})
-      	format_simple_command do
-        	table._append_internal(row, column, value, args)
-        end
+        table._append_internal(row, column, value, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb
index 753067a..fe34c5c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/append_peer_tableCFs.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(id, table_cfs)
-        format_simple_command do
-          replication_admin.append_peer_tableCFs(id, table_cfs)
-        end
+        replication_admin.append_peer_tableCFs(id, table_cfs)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/assign.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/assign.rb b/hbase-shell/src/main/ruby/shell/commands/assign.rb
index 448a546..1220bf1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/assign.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/assign.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(region_name)
-        format_simple_command do
-          admin.assign(region_name)
-        end
+        admin.assign(region_name)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb b/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb
index 4d7778d..107d267 100644
--- a/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/balance_switch.rb
@@ -31,11 +31,7 @@ EOF
       end
 
       def command(enableDisable)
-        format_simple_command do
-          formatter.row([
-            admin.balance_switch(enableDisable)? "true" : "false"
-          ])
-        end
+        formatter.row([admin.balance_switch(enableDisable)? "true" : "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/balancer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/balancer.rb b/hbase-shell/src/main/ruby/shell/commands/balancer.rb
index ee53ca0..a7490a5 100644
--- a/hbase-shell/src/main/ruby/shell/commands/balancer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/balancer.rb
@@ -38,15 +38,13 @@ EOF
       end
 
       def command(force=nil)
-        format_simple_command do
-          formatter.row([
-            if force.nil?
-              admin.balancer("false")? "true": "false"
-            elsif force == "force"
-              admin.balancer("true")? "true": "false"
-            end
-          ])
+        force_balancer = 'false'
+        if force == 'force'
+          force_balancer = 'true'
+        elsif !force.nil?
+          raise ArgumentError, "Invalid argument #{force}."
         end
+        formatter.row([admin.balancer(force_balancer)? "true": "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb
index 3b2f5c6..6a75ab5 100644
--- a/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/balancer_enabled.rb
@@ -30,11 +30,7 @@ EOF
       end
 
       def command()
-        format_simple_command do
-          formatter.row([
-            admin.balancer_enabled?.to_s
-          ])
-        end
+        formatter.row([admin.balancer_enabled?.to_s])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
index b310c3a..fdeb67e 100644
--- a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_enabled.rb
@@ -29,11 +29,7 @@ EOF
       end
 
       def command()
-        format_simple_command do
-          formatter.row([
-            admin.catalogjanitor_enabled()? "true" : "false"
-          ])
-        end
+        formatter.row([admin.catalogjanitor_enabled()? "true" : "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb
index 03426cb..638a18f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_run.rb
@@ -28,9 +28,7 @@ Catalog janitor command to run the (garbage collection) scan from command line.
 EOF
       end
       def command()
-        format_simple_command do
-          admin.catalogjanitor_run()
-        end
+        admin.catalogjanitor_run()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb
index fce1925..d2d8e58 100644
--- a/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/catalogjanitor_switch.rb
@@ -30,11 +30,7 @@ EOF
       end
 
       def command(enableDisable)
-        format_simple_command do
-          formatter.row([
-            admin.catalogjanitor_switch(enableDisable)? "true" : "false"
-          ])
-        end
+        formatter.row([admin.catalogjanitor_switch(enableDisable)? "true" : "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb b/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb
index 8553fa6..be56d5d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/clear_auths.rb
@@ -31,9 +31,7 @@ EOF
       end
 
       def command(user, *args)
-        format_simple_command do
-          visibility_labels_admin.clear_auths(user, args)
-        end
+        visibility_labels_admin.clear_auths(user, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb
index 0498c8e..c57e87f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/clone_snapshot.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(snapshot_name, table)
-        format_simple_command do
-          admin.clone_snapshot(snapshot_name, table)
-        end
+        admin.clone_snapshot(snapshot_name, table)
       end
 
       def handle_exceptions(cause, *args)

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/close_region.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/close_region.rb b/hbase-shell/src/main/ruby/shell/commands/close_region.rb
index ef1c99c..3d01911 100644
--- a/hbase-shell/src/main/ruby/shell/commands/close_region.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/close_region.rb
@@ -51,9 +51,7 @@ EOF
       end
 
       def command(region_name, server = nil)
-        format_simple_command do
-          admin.close_region(region_name, server)
-        end
+        admin.close_region(region_name, server)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/compact.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/compact.rb b/hbase-shell/src/main/ruby/shell/commands/compact.rb
index e267821..1607536 100644
--- a/hbase-shell/src/main/ruby/shell/commands/compact.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/compact.rb
@@ -44,9 +44,7 @@ module Shell
       end
 
       def command(table_or_region_name, family = nil, type = "NORMAL")
-        format_simple_command do
-          admin.compact(table_or_region_name, family, type)
-        end
+        admin.compact(table_or_region_name, family, type)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb b/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
index 0ecdd21..588b6fe 100644
--- a/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
@@ -34,9 +34,7 @@ module Shell
       end
 
       def command(regionserver, major = false)
-        format_simple_command do
-          admin.compact_regionserver(regionserver, major)
-        end
+        admin.compactRegionserver(regionserver, major)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/count.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/count.rb b/hbase-shell/src/main/ruby/shell/commands/count.rb
index 225005e..36250a6 100644
--- a/hbase-shell/src/main/ruby/shell/commands/count.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/count.rb
@@ -61,12 +61,12 @@ EOF
         }.merge(params)
 
         # Call the counter method
-        now = Time.now
+        @start_time = Time.now
         formatter.header
         count = table._count_internal(params['INTERVAL'].to_i, params['CACHE'].to_i) do |cnt, row|
           formatter.row([ "Current count: #{cnt}, row: #{row}" ])
         end
-        formatter.footer(now, count)
+        formatter.footer(count)
         return count
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/create.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/create.rb b/hbase-shell/src/main/ruby/shell/commands/create.rb
index ab149bf..c237ca9 100644
--- a/hbase-shell/src/main/ruby/shell/commands/create.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/create.rb
@@ -62,10 +62,9 @@ EOF
       end
 
       def command(table, *args)
-        format_simple_command do
-          ret = admin.create(table, *args)
-        end
-        #and then return the table you just created
+        admin.create(table, *args)
+        @end_time = Time.now
+        #and then return the table just created
         table(table)
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb b/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb
index adb6897..d478fc1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/create_namespace.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(namespace, *args)
-        format_simple_command do
-          admin.create_namespace(namespace, *args)
-        end
+        admin.create_namespace(namespace, *args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/delete.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/delete.rb b/hbase-shell/src/main/ruby/shell/commands/delete.rb
index dcb8341..bce6625 100644
--- a/hbase-shell/src/main/ruby/shell/commands/delete.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/delete.rb
@@ -40,16 +40,15 @@ t to table 't1', the corresponding command would be:
 EOF
       end
 
-      def command(table, row, column, 
-      				timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
+      def command(table, row, column,
+                  timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
         delete(table(table), row, column, timestamp, args)
       end
 
-      def delete(table, row, column, 
-      				timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
-        format_simple_command do
-          table._delete_internal(row, column, timestamp, args)
-        end
+      def delete(table, row, column,
+                 timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
+        @start_time = Time.now
+        table._delete_internal(row, column, timestamp, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb
index bc07259..5a77f73 100644
--- a/hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/delete_all_snapshot.rb
@@ -41,9 +41,9 @@ EOF
         answer = gets.chomp unless count == 0
         puts "No snapshots matched the regex #{regex.to_s}" if count == 0
         return unless answer =~ /y.*/i
-        format_simple_command do
-          admin.delete_all_snapshot(regex)
-        end
+        @start_time = Time.now
+        admin.delete_all_snapshot(regex)
+        @end_time = Time.now
         list = admin.list_snapshot(regex)
         leftOverSnapshotCount = list.size
         successfullyDeleted = count - leftOverSnapshotCount

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb
index b8c3791..48059b0 100644
--- a/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/delete_snapshot.rb
@@ -28,9 +28,7 @@ EOF
       end
 
       def command(snapshot_name)
-        format_simple_command do
-          admin.delete_snapshot(snapshot_name)
-        end
+        admin.delete_snapshot(snapshot_name)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb b/hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb
index dc4e0a2..89936f1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/delete_table_snapshots.rb
@@ -51,16 +51,15 @@ EOF
         puts "No snapshots matched the table name regular expression #{tableNameregex.to_s} and the snapshot name regular expression #{snapshotNameRegex.to_s}" if count == 0
         return unless answer =~ /y.*/i
 
-        format_simple_command do
-          list.each do |deleteSnapshot|
-            begin
-              admin.delete_snapshot(deleteSnapshot.getName)
-              puts "Successfully deleted snapshot: #{deleteSnapshot.getName}"
-              puts "\n"
-            rescue RuntimeError
-              puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $!
-              puts "\n"
-            end
+        @start_time = Time.now
+        list.each do |deleteSnapshot|
+          begin
+            admin.delete_snapshot(deleteSnapshot.getName)
+            puts "Successfully deleted snapshot: #{deleteSnapshot.getName}"
+            puts "\n"
+          rescue RuntimeError
+            puts "Failed to delete snapshot: #{deleteSnapshot.getName}, due to below exception,\n" + $!
+            puts "\n"
           end
         end
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/deleteall.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/deleteall.rb b/hbase-shell/src/main/ruby/shell/commands/deleteall.rb
index e6118c9..2965403 100644
--- a/hbase-shell/src/main/ruby/shell/commands/deleteall.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/deleteall.rb
@@ -48,9 +48,8 @@ EOF
 
       def deleteall(table, row, column = nil,
                     timestamp = org.apache.hadoop.hbase.HConstants::LATEST_TIMESTAMP, args = {})
-        format_simple_command do
-          table._deleteall_internal(row, column, timestamp, args)
-        end
+        @start_time = Time.now
+        table._deleteall_internal(row, column, timestamp, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/describe.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/describe.rb b/hbase-shell/src/main/ruby/shell/commands/describe.rb
index bfa16cd..37938a7 100644
--- a/hbase-shell/src/main/ruby/shell/commands/describe.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/describe.rb
@@ -33,8 +33,6 @@ EOF
       end
 
       def command(table)
-        now = Time.now
-
         column_families = admin.get_column_families(table)
 
         formatter.header(["Table " + table.to_s + " is " + if admin.enabled?(table) then "ENABLED" else "DISABLED" end])
@@ -43,7 +41,7 @@ EOF
         column_families.each do |column_family|
           formatter.row([ column_family.to_s ], true)
         end
-        formatter.footer(now)
+        formatter.footer()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb b/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb
index cf135da..ebd9bd2 100644
--- a/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/describe_namespace.rb
@@ -28,13 +28,10 @@ EOF
       end
 
       def command(namespace)
-        now = Time.now
-
         desc = admin.describe_namespace(namespace)
 
         formatter.header([ "DESCRIPTION" ], [ 64 ])
         formatter.row([ desc ], true, [ 64 ])
-        formatter.footer(now)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/disable.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/disable.rb b/hbase-shell/src/main/ruby/shell/commands/disable.rb
index 79bcd86..6695002 100644
--- a/hbase-shell/src/main/ruby/shell/commands/disable.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/disable.rb
@@ -29,9 +29,7 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          admin.disable(table)
-        end
+        admin.disable(table)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb b/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb
index 416545b..c193f13 100644
--- a/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/disable_peer.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(id)
-        format_simple_command do
-          replication_admin.disable_peer(id)
-        end
+        replication_admin.disable_peer(id)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb b/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb
index 5bf9667..a020d81 100644
--- a/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/disable_table_replication.rb
@@ -30,10 +30,8 @@ EOF
       end
 
       def command(table_name)
-        format_simple_command do
-          replication_admin.disable_tablerep(table_name)
-        end
-        puts "The replication swith of table '#{table_name}' successfully disabled"
+        replication_admin.disable_tablerep(table_name)
+        puts "Replication of table '#{table_name}' successfully disabled."
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/drop.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/drop.rb b/hbase-shell/src/main/ruby/shell/commands/drop.rb
index fc7b134..3f7332c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/drop.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/drop.rb
@@ -29,9 +29,7 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          admin.drop(table)
-        end
+        admin.drop(table)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb b/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb
index b030d27..392f247 100644
--- a/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/drop_namespace.rb
@@ -27,9 +27,7 @@ EOF
       end
 
       def command(namespace)
-        format_simple_command do
-          admin.drop_namespace(namespace)
-        end
+        admin.drop_namespace(namespace)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/enable.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/enable.rb b/hbase-shell/src/main/ruby/shell/commands/enable.rb
index deeb70c..5d21219 100644
--- a/hbase-shell/src/main/ruby/shell/commands/enable.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/enable.rb
@@ -29,9 +29,7 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          admin.enable(table)
-        end
+        admin.enable(table)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb b/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb
index 55136ff..5f1a727 100644
--- a/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/enable_peer.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(id)
-        format_simple_command do
-          replication_admin.enable_peer(id)
-        end
+        replication_admin.enable_peer(id)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb b/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb
index 15e3133..e4e2fc1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/enable_table_replication.rb
@@ -30,10 +30,8 @@ EOF
       end
 
       def command(table_name)
-        format_simple_command do
-          replication_admin.enable_tablerep(table_name)
-        end
-        puts "The replication swith of table '#{table_name}' successfully enabled"
+        replication_admin.enable_tablerep(table_name)
+        puts "The replication of table '#{table_name}' successfully enabled"
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/exists.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/exists.rb b/hbase-shell/src/main/ruby/shell/commands/exists.rb
index bacf6c9..7a64813 100644
--- a/hbase-shell/src/main/ruby/shell/commands/exists.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/exists.rb
@@ -29,11 +29,9 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          formatter.row([
+        formatter.row([
             "Table #{table} " + (admin.exists?(table.to_s) ? "does exist" : "does not exist")
           ])
-        end
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/flush.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/flush.rb b/hbase-shell/src/main/ruby/shell/commands/flush.rb
index 2aefec5..13963e1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/flush.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/flush.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(table_or_region_name)
-        format_simple_command do
-          admin.flush(table_or_region_name)
-        end
+        admin.flush(table_or_region_name)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/get.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get.rb b/hbase-shell/src/main/ruby/shell/commands/get.rb
index b8bfd52..8191c22 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get.rb
@@ -81,14 +81,14 @@ EOF
       end
 
       def get(table, row, *args)
-        now = Time.now
+        @start_time = Time.now
         formatter.header(["COLUMN", "CELL"])
 
         count, is_stale = table._get_internal(row, *args) do |column, value|
           formatter.row([ column, value ])
         end
 
-        formatter.footer(now, count, is_stale)
+        formatter.footer(count, is_stale)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
index 1b758ef..04b486b 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_auths.rb
@@ -31,11 +31,9 @@ EOF
       end
 
       def command(user)
-        format_simple_command do
-          list = visibility_labels_admin.get_auths(user)
-          list.each do |auths|
-            formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)])
-          end  
+        list = visibility_labels_admin.get_auths(user)
+        list.each do |auths|
+          formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(auths.toByteArray)])
         end
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
index ee02229..3da6bdf 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
@@ -25,10 +25,9 @@ module Shell
       end
 
       def command(id)
-          peer_config = replication_admin.get_peer_config(id)
-          format_simple_command do
-            format_peer_config(peer_config)
-          end
+        peer_config = replication_admin.get_peer_config(id)
+        @start_time = Time.now
+        format_peer_config(peer_config)
       end
 
       def format_peer_config(peer_config)

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/get_table.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_table.rb b/hbase-shell/src/main/ruby/shell/commands/get_table.rb
index 43e7c1a..2270f43 100644
--- a/hbase-shell/src/main/ruby/shell/commands/get_table.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/get_table.rb
@@ -38,9 +38,7 @@ EOF
       end
 
       def command(table, *args)
-        format_and_return_simple_command do
-          table(table)
-        end
+        table(table)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/grant.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/grant.rb b/hbase-shell/src/main/ruby/shell/commands/grant.rb
index a4e4547..d6f848b 100644
--- a/hbase-shell/src/main/ruby/shell/commands/grant.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/grant.rb
@@ -74,9 +74,8 @@ EOF
               end
             end
           end
-          format_simple_command do
-            security_admin.grant(user, permissions, table_name, family, qualifier)
-          end
+          @start_time = Time.now
+          security_admin.grant(user, permissions, table_name, family, qualifier)
 
         elsif args[1].kind_of?(Hash)
 
@@ -92,7 +91,7 @@ EOF
           raise(ArgumentError, "Scanner specification is not a Hash") unless scan.kind_of?(Hash)
 
           t = table(table_name)
-          now = Time.now
+          @start_time = Time.now
           scanner = t._get_scanner(scan)
           count = 0
           iter = scanner.iterator
@@ -106,7 +105,7 @@ EOF
             end
             count += 1
           end
-          formatter.footer(now, count)
+          formatter.footer(count)
 
         else
           raise(ArgumentError, "Second argument should be a String or Hash")

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/incr.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/incr.rb b/hbase-shell/src/main/ruby/shell/commands/incr.rb
index d223a45..318fac3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/incr.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/incr.rb
@@ -49,13 +49,11 @@ EOF
       end
 
       def incr(table, row, column, value = nil, args={})
-      	format_simple_command do
-          if cnt = table._incr_internal(row, column, value, args)
-            puts "COUNTER VALUE = #{cnt}"
-          else
-            puts "No counter found at specified coordinates"
-          end
-      	end
+        if cnt = table._incr_internal(row, column, value, args)
+          puts "COUNTER VALUE = #{cnt}"
+        else
+          puts "No counter found at specified coordinates"
+        end
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb b/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb
index 6da7046..6a914e3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/is_disabled.rb
@@ -29,12 +29,8 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          formatter.row([
-            admin.disabled?(table)? "true" : "false"
-          ])
-        end
-      end
+        formatter.row([admin.disabled?(table)? "true" : "false"])
+    end
     end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
index 960ade7..d8fb2ab 100644
--- a/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/is_enabled.rb
@@ -29,11 +29,7 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          formatter.row([
-            admin.enabled?(table)? "true" : "false"
-          ])
-        end
+        formatter.row([admin.enabled?(table)? "true" : "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list.rb b/hbase-shell/src/main/ruby/shell/commands/list.rb
index dce0ae2..f7a0987 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list.rb
@@ -33,7 +33,6 @@ EOF
       end
 
       def command(regex = ".*")
-        now = Time.now
         formatter.header([ "TABLE" ])
 
         list = admin.list(regex)
@@ -41,7 +40,7 @@ EOF
           formatter.row([ table ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
         return list
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_labels.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_labels.rb b/hbase-shell/src/main/ruby/shell/commands/list_labels.rb
index 6c7f991..6b730b2 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_labels.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_labels.rb
@@ -32,11 +32,9 @@ EOF
       end
 
       def command(regex = ".*")
-        format_simple_command do
-          list = visibility_labels_admin.list_labels(regex)
-          list.each do |label|
-            formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(label.toByteArray)])
-          end
+        list = visibility_labels_admin.list_labels(regex)
+        list.each do |label|
+          formatter.row([org.apache.hadoop.hbase.util.Bytes::toStringBinary(label.toByteArray)])
         end
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb b/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb
index 5d25604..63aeac1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_namespace.rb
@@ -31,7 +31,6 @@ EOF
       end
 
       def command(regex = ".*")
-        now = Time.now
         formatter.header([ "NAMESPACE" ])
 
         list = admin.list_namespace(regex)
@@ -39,7 +38,7 @@ EOF
           formatter.row([ table ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb b/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
index 29e1812..9db090e 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_namespace_tables.rb
@@ -30,7 +30,6 @@ EOF
       end
 
       def command(namespace)
-        now = Time.now
         formatter.header([ "TABLE" ])
 
         list = admin.list_namespace_tables(namespace)
@@ -38,7 +37,7 @@ EOF
           formatter.row([ table ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb b/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
index fc6e4a7..153e0ce 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peer_configs.rb
@@ -25,16 +25,14 @@ module Shell
       end
 
       def command
-        format_simple_command do
-          peer_configs = replication_admin.list_peer_configs
-          unless peer_configs.nil?
-            peer_configs.each do |peer_config_entry|
-              peer_id = peer_config_entry[0]
-              peer_config = peer_config_entry[1]
-              formatter.row(["PeerId", peer_id])
-              GetPeerConfig.new(@shell).format_peer_config(peer_config)
-              formatter.row([" "])
-            end
+        peer_configs = replication_admin.list_peer_configs
+        unless peer_configs.nil?
+          peer_configs.each do |peer_config_entry|
+            peer_id = peer_config_entry[0]
+            peer_config = peer_config_entry[1]
+            formatter.row(["PeerId", peer_id])
+            GetPeerConfig.new(@shell).format_peer_config(peer_config)
+            formatter.row([" "])
           end
         end
       end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index cc1be04..c5c3397 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -30,7 +30,6 @@ EOF
       end
 
       def command()
-        now = Time.now
         peers = replication_admin.list_peers
 
         formatter.header(["PEER_ID", "CLUSTER_KEY", "STATE", "TABLE_CFS"])
@@ -41,7 +40,7 @@ EOF
           formatter.row([ e.key, e.value, state, tableCFs ])
         end
 
-        formatter.footer(now)
+        formatter.footer()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_procedures.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_procedures.rb b/hbase-shell/src/main/ruby/shell/commands/list_procedures.rb
index f407547..83e08c1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_procedures.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_procedures.rb
@@ -29,7 +29,6 @@ EOF
       end
 
       def command()
-        now = Time.now
         formatter.header([ "Id", "Name", "State", "Start_Time", "Last_Update" ])
 
         list = admin.list_procedures()
@@ -39,7 +38,7 @@ EOF
           formatter.row([ proc.getProcId, proc.getProcName, proc.getProcState, start_time, last_update ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb b/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
index 682bb71..604d833 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_quotas.rb
@@ -37,7 +37,6 @@ EOF
       end
 
       def command(args = {})
-        now = Time.now
         formatter.header(["OWNER", "QUOTAS"])
 
         #actually do the scanning
@@ -45,7 +44,7 @@ EOF
           formatter.row([ row, cells ])
         end
 
-        formatter.footer(now, count)
+        formatter.footer(count)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb b/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb
index 0db1d83..142adfc 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_replicated_tables.rb
@@ -31,8 +31,6 @@ EOF
       end
 
       def command(regex = ".*")
-        now = Time.now
-
         formatter.header([ "TABLE:COLUMNFAMILY", "ReplicationType" ], [ 32 ])
         list = replication_admin.list_replicated_tables(regex)
         list.each do |e|
@@ -43,7 +41,7 @@ EOF
           end
           formatter.row([e.get(org.apache.hadoop.hbase.client.replication.ReplicationAdmin::TNAME) + ":" + e.get(org.apache.hadoop.hbase.client.replication.ReplicationAdmin::CFNAME), replicateType], true, [32])
         end
-        formatter.footer(now)
+        formatter.footer()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb
index 4e68802..bc91737 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_snapshots.rb
@@ -34,7 +34,6 @@ EOF
       end
 
       def command(regex = ".*")
-        now = Time.now
         formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
 
         list = admin.list_snapshot(regex)
@@ -43,7 +42,7 @@ EOF
           formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
         return list.map { |s| s.getName() }
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb b/hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb
index 3a32e9e..1efcc17 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_table_snapshots.rb
@@ -39,7 +39,6 @@ EOF
       end
 
       def command(tableNameRegex, snapshotNameRegex = ".*")
-        now = Time.now
         formatter.header([ "SNAPSHOT", "TABLE + CREATION TIME"])
 
         list = admin.list_table_snapshots(tableNameRegex, snapshotNameRegex)
@@ -48,7 +47,7 @@ EOF
           formatter.row([ snapshot.getName, snapshot.getTable + " (" + creation_time + ")" ])
         end
 
-        formatter.footer(now, list.size)
+        formatter.footer(list.size)
         return list.map { |s| s.getName() }
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/locate_region.rb b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
index b1e8c7b..a2815d6 100644
--- a/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/locate_region.rb
@@ -30,14 +30,12 @@ EOF
       end
 
       def command(table, row_key)
-        now = Time.now
-
         region_location = admin.locate_region(table, row_key)
         hri = region_location.getRegionInfo()
 
         formatter.header([ "HOST", "REGION" ])
         formatter.row([region_location.getHostnamePort(), hri.toString()])
-        formatter.footer(now, 1)
+        formatter.footer(1)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/major_compact.rb b/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
index 5efd895..1af6c64 100644
--- a/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/major_compact.rb
@@ -44,9 +44,7 @@ module Shell
       end
 
       def command(table_or_region_name, family = nil, type = "NORMAL")
-        format_simple_command do
-          admin.major_compact(table_or_region_name, family, type)
-        end
+        admin.majorCompact(table_or_region_name, family, type)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/merge_region.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/merge_region.rb b/hbase-shell/src/main/ruby/shell/commands/merge_region.rb
index 6afa2e5..63f7159 100644
--- a/hbase-shell/src/main/ruby/shell/commands/merge_region.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/merge_region.rb
@@ -40,9 +40,7 @@ EOF
       end
 
       def command(encoded_region_a_name, encoded_region_b_name, force = 'false')
-        format_simple_command do
-          admin.merge_region(encoded_region_a_name, encoded_region_b_name, force)
-        end
+        admin.merge_region(encoded_region_a_name, encoded_region_b_name, force)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/move.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/move.rb b/hbase-shell/src/main/ruby/shell/commands/move.rb
index e6b2828..24816f3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/move.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/move.rb
@@ -38,9 +38,7 @@ EOF
       end
 
       def command(encoded_region_name, server_name = nil)
-        format_simple_command do
-          admin.move(encoded_region_name, server_name)
-        end
+        admin.move(encoded_region_name, server_name)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/normalize.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalize.rb b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
index 7e6302c..0a61227 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalize.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
@@ -33,11 +33,7 @@ EOF
       end
 
       def command()
-        format_simple_command do
-          formatter.row([
-            admin.normalize()? "true": "false"
-          ])
-        end
+        formatter.row([admin.normalize()? "true": "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb
index 1121b25..d39b777 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalizer_enabled.rb
@@ -30,11 +30,7 @@ EOF
       end
 
       def command()
-        format_simple_command do
-          formatter.row([
-            admin.normalizer_enabled?.to_s
-          ])
-        end
+        formatter.row([admin.normalizer_enabled?.to_s])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb b/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
index 6d959c4..7a12b71 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalizer_switch.rb
@@ -32,11 +32,7 @@ EOF
       end
 
       def command(enableDisable)
-        format_simple_command do
-          formatter.row([
-            admin.normalizer_switch(enableDisable)? "true" : "false"
-          ])
-        end
+        formatter.row([admin.normalizer_switch(enableDisable)? "true" : "false"])
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/put.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/put.rb b/hbase-shell/src/main/ruby/shell/commands/put.rb
index 2b47a4d..39f9fea 100644
--- a/hbase-shell/src/main/ruby/shell/commands/put.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/put.rb
@@ -45,9 +45,8 @@ EOF
       end
 
       def put(table, row, column, value, timestamp = nil, args = {})
-        format_simple_command do
-          table._put_internal(row, column, value, timestamp, args)
-        end
+        @start_time = Time.now
+        table._put_internal(row, column, value, timestamp, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb b/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb
index 5ae5786..bc9d6ab 100644
--- a/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/remove_peer.rb
@@ -30,9 +30,7 @@ EOF
       end
 
       def command(id)
-        format_simple_command do
-          replication_admin.remove_peer(id)
-        end
+        replication_admin.remove_peer(id)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb
index 70bc9b5..adfb85d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/remove_peer_tableCFs.rb
@@ -33,9 +33,7 @@ EOF
       end
 
       def command(id, table_cfs)
-        format_simple_command do
-          replication_admin.remove_peer_tableCFs(id, table_cfs)
-        end
+        replication_admin.remove_peer_tableCFs(id, table_cfs)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb
index 4d53171..2471e1b 100644
--- a/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/restore_snapshot.rb
@@ -32,9 +32,7 @@ EOF
       end
 
       def command(snapshot_name)
-        format_simple_command do
-          admin.restore_snapshot(snapshot_name)
-        end
+        admin.restore_snapshot(snapshot_name)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/revoke.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/revoke.rb b/hbase-shell/src/main/ruby/shell/commands/revoke.rb
index 4a0d5ff..bcf60e9 100644
--- a/hbase-shell/src/main/ruby/shell/commands/revoke.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/revoke.rb
@@ -39,9 +39,7 @@ EOF
       end
 
       def command(user, table_name=nil, family=nil, qualifier=nil)
-        format_simple_command do
-          security_admin.revoke(user, table_name, family, qualifier)
-        end
+        security_admin.revoke(user, table_name, family, qualifier)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/scan.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/scan.rb b/hbase-shell/src/main/ruby/shell/commands/scan.rb
index 381acef..b3cc5c8 100644
--- a/hbase-shell/src/main/ruby/shell/commands/scan.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/scan.rb
@@ -104,16 +104,17 @@ EOF
 
       #internal command that actually does the scanning
       def scan(table, args = {})
-        now = Time.now
         formatter.header(["ROW", "COLUMN+CELL"])
 
         scan = table._hash_to_scan(args)
         #actually do the scanning
+        @start_time = Time.now
         count, is_stale = table._scan_internal(args, scan) do |row, cells|
           formatter.row([ row, cells ])
         end
+        @end_time = Time.now
 
-        formatter.footer(now, count, is_stale)
+        formatter.footer(count, is_stale)
         # if scan metrics were enabled, print them after the results
         if (scan != nil && scan.isScanMetricsEnabled())
           formatter.scan_metrics(scan.getScanMetrics(), args["METRICS"])

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/set_auths.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_auths.rb b/hbase-shell/src/main/ruby/shell/commands/set_auths.rb
index 4a52eb0..5663ec3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_auths.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_auths.rb
@@ -31,9 +31,7 @@ EOF
       end
 
       def command(user, *args)
-        format_simple_command do
-          visibility_labels_admin.set_auths(user, args)
-        end
+        visibility_labels_admin.set_auths(user, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
index fb7fae5..b2e823c 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
@@ -41,9 +41,7 @@ module Shell
       end
 
       def command(id, peer_table_cfs = nil)
-        format_simple_command do
-          replication_admin.set_peer_tableCFs(id, peer_table_cfs)
-        end
+        replication_admin.set_peer_tableCFs(id, peer_table_cfs)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/set_visibility.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_visibility.rb b/hbase-shell/src/main/ruby/shell/commands/set_visibility.rb
index 2c77d8b..058ccf2 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_visibility.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_visibility.rb
@@ -51,7 +51,7 @@ EOF
 
       def command(table, visibility, scan)
         t = table(table)
-        now = Time.now
+        @start_time = Time.now
         scanner = t._get_scanner(scan)
         count = 0
         iter = scanner.iterator
@@ -65,7 +65,7 @@ EOF
           end
           count += 1
         end
-        formatter.footer(now, count)
+        formatter.footer(count)
       end
 
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/show_filters.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/show_filters.rb b/hbase-shell/src/main/ruby/shell/commands/show_filters.rb
index cdbd9ed..5ff0be4 100644
--- a/hbase-shell/src/main/ruby/shell/commands/show_filters.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/show_filters.rb
@@ -36,7 +36,6 @@ EOF
       end
 
       def command( )
-        now = Time.now
         parseFilter = ParseFilter.new
         supportedFilters = parseFilter.getSupportedFilters
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
index 15bf298..fd37d07 100644
--- a/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/snapshot.rb
@@ -29,9 +29,7 @@ EOF
       end
 
       def command(table, snapshot_name, *args)
-        format_simple_command do
-          admin.snapshot(table, snapshot_name, *args)
-        end
+        admin.snapshot(table, snapshot_name, *args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/split.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/split.rb b/hbase-shell/src/main/ruby/shell/commands/split.rb
index 9dc424f..9e6ec6a 100644
--- a/hbase-shell/src/main/ruby/shell/commands/split.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/split.rb
@@ -34,9 +34,7 @@ EOF
       end
 
       def command(table_or_region_name, split_point = nil)
-        format_simple_command do
-          admin.split(table_or_region_name, split_point)
-        end
+        admin.split(table_or_region_name, split_point)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb b/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb
index 7da7564..5a13871 100644
--- a/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/splitormerge_enabled.rb
@@ -30,11 +30,9 @@ EOF
       end
 
       def command(switch_type)
-        format_simple_command do
-          formatter.row(
-            [admin.splitormerge_enabled(switch_type) ? 'true' : 'false']
-          )
-        end
+        formatter.row(
+          [admin.splitormerge_enabled(switch_type) ? 'true' : 'false']
+        )
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb b/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb
index f4c2858..73dc82d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/splitormerge_switch.rb
@@ -32,11 +32,9 @@ EOF
       end
 
       def command(switch_type, enabled)
-        format_simple_command do
-          formatter.row(
-            [admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false']
-          )
-        end
+        formatter.row(
+          [admin.splitormerge_switch(switch_type, enabled) ? 'true' : 'false']
+        )
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/trace.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/trace.rb b/hbase-shell/src/main/ruby/shell/commands/trace.rb
index 5e00930..d838979 100644
--- a/hbase-shell/src/main/ruby/shell/commands/trace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/trace.rb
@@ -48,9 +48,7 @@ EOF
       end
 
       def command(startstop="status", spanname="HBaseShell")
-        format_and_return_simple_command do 
-          trace(startstop, spanname)
-        end
+        trace(startstop, spanname)
       end
 
       def trace(startstop, spanname)

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/truncate.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/truncate.rb b/hbase-shell/src/main/ruby/shell/commands/truncate.rb
index b7812fb..3f888c6 100644
--- a/hbase-shell/src/main/ruby/shell/commands/truncate.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/truncate.rb
@@ -27,10 +27,8 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          puts "Truncating '#{table}' table (it may take a while):"
-          admin.truncate(table) { |log| puts " - #{log}" }
-        end
+        puts "Truncating '#{table}' table (it may take a while):"
+        admin.truncate(table) { |log| puts " - #{log}" }
       end
 
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb b/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
index 918b232..fcce5e5 100644
--- a/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/truncate_preserve.rb
@@ -27,10 +27,8 @@ EOF
       end
 
       def command(table)
-        format_simple_command do
-          puts "Truncating '#{table}' table (it may take a while):"
-          admin.truncate_preserve(table) { |log| puts " - #{log}" }
-        end
+        puts "Truncating '#{table}' table (it may take a while):"
+        admin.truncate_preserve(table) { |log| puts " - #{log}" }
       end
 
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/unassign.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/unassign.rb b/hbase-shell/src/main/ruby/shell/commands/unassign.rb
index 5eea71f..b69971f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/unassign.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/unassign.rb
@@ -36,9 +36,7 @@ EOF
       end
 
       def command(region_name, force = 'false')
-        format_simple_command do
-          admin.unassign(region_name, force)
-        end
+        admin.unassign(region_name, force)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/update_all_config.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/update_all_config.rb b/hbase-shell/src/main/ruby/shell/commands/update_all_config.rb
index 05295b7..cb6852f 100644
--- a/hbase-shell/src/main/ruby/shell/commands/update_all_config.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/update_all_config.rb
@@ -30,9 +30,7 @@ EOF
       end
 
       def command()
-        format_simple_command do
-          admin.update_all_config()
-        end
+        admin.update_all_config()
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/update_config.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/update_config.rb b/hbase-shell/src/main/ruby/shell/commands/update_config.rb
index 9f65fdd..3617bb3 100644
--- a/hbase-shell/src/main/ruby/shell/commands/update_config.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/update_config.rb
@@ -31,9 +31,7 @@ EOF
       end
 
       def command(serverName)
-        format_simple_command do
-          admin.update_config(serverName)
-        end
+        admin.update_config(serverName)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb b/hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb
index 5d721fd..c09acc2 100644
--- a/hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/update_peer_config.rb
@@ -40,9 +40,7 @@ To update TABLE_CFs, see the append_peer_tableCFs and remove_peer_tableCFs comma
       end
 
       def command(id, args = {})
-        format_simple_command do
-          replication_admin.update_peer_config(id, args)
-        end
+        replication_admin.update_peer_config(id, args)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/user_permission.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/user_permission.rb b/hbase-shell/src/main/ruby/shell/commands/user_permission.rb
index 71b98f3..4b5d3ff 100644
--- a/hbase-shell/src/main/ruby/shell/commands/user_permission.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/user_permission.rb
@@ -40,16 +40,14 @@ EOF
       end
 
       def command(table_regex=nil)
-        #format_simple_command do
         #admin.user_permission(table_regex)
-        now = Time.now
         formatter.header(["User", "Namespace,Table,Family,Qualifier:Permission"])
 
         count = security_admin.user_permission(table_regex) do |user, permission|
           formatter.row([ user, permission])
         end
 
-        formatter.footer(now, count)
+        formatter.footer(count)
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/commands/wal_roll.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/wal_roll.rb b/hbase-shell/src/main/ruby/shell/commands/wal_roll.rb
index 0fe1870..a94e9e1 100644
--- a/hbase-shell/src/main/ruby/shell/commands/wal_roll.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/wal_roll.rb
@@ -30,11 +30,10 @@ EOF
       end
 
       def command(server_name)
-        format_simple_command do
-          admin.wal_roll(server_name)
-        end
+        admin.wal_roll(server_name)
       end
     end
+
     #TODO remove old HLog version
     class HlogRoll < WalRoll
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/main/ruby/shell/formatter.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/formatter.rb b/hbase-shell/src/main/ruby/shell/formatter.rb
index 6e598fb..2f800f6 100644
--- a/hbase-shell/src/main/ruby/shell/formatter.rb
+++ b/hbase-shell/src/main/ruby/shell/formatter.rb
@@ -64,7 +64,7 @@ module Shell
 
         # Print a string
         if args.is_a?(String)
-          output(args)
+          output_str(args)
           @out.puts
           return
         end
@@ -162,7 +162,7 @@ module Shell
         return str
       end
 
-      def output(str)
+      def output_str(str)
         output(@max_width, str)
       end
 
@@ -177,15 +177,12 @@ module Shell
         end
       end
 
-      def footer(start_time = nil, row_count = nil, is_stale = false)
-        return unless start_time
+      def footer(row_count = nil, is_stale = false)
         row_count ||= @row_count
         # Only output elapsed time and row count if startTime passed
-        @out.print("%d row(s) in %.4f seconds" % [row_count, Time.now - start_time])
+        @out.puts("%d row(s)" % [row_count])
         if is_stale == true
           @out.puts(" (possible stale results) ")
-        else
-          @out.puts("")
         end
       end
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/test/ruby/shell/formatter_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/formatter_test.rb b/hbase-shell/src/test/ruby/shell/formatter_test.rb
index 7010479..540dd09 100644
--- a/hbase-shell/src/test/ruby/shell/formatter_test.rb
+++ b/hbase-shell/src/test/ruby/shell/formatter_test.rb
@@ -63,6 +63,6 @@ class ShellFormatterTest < Test::Unit::TestCase
   end
 
   define_test "Froematter#footer should work" do
-    formatter.footer(Time.now - 5)
+    formatter.footer()
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/test/ruby/shell/shell_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/shell_test.rb b/hbase-shell/src/test/ruby/shell/shell_test.rb
index f37f60c..ab150a5 100644
--- a/hbase-shell/src/test/ruby/shell/shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/shell_test.rb
@@ -26,12 +26,12 @@ class ShellTest < Test::Unit::TestCase
     @shell = Shell::Shell.new(@hbase)
   end
 
-  define_test "Shell::Shell#hbase_admin should return an admin instance" do
-    assert_kind_of(Hbase::Admin, @shell.hbase_admin)
+  define_test "Shell::Shell#admin should return an admin instance" do
+    assert_kind_of(Hbase::Admin, @shell.admin)
   end
 
-  define_test "Shell::Shell#hbase_admin should cache admin instances" do
-    assert_same(@shell.hbase_admin, @shell.hbase_admin)
+  define_test "Shell::Shell#admin should cache admin instances" do
+    assert_same(@shell.admin, @shell.admin)
   end
 
   #-------------------------------------------------------------------------------
@@ -44,6 +44,10 @@ class ShellTest < Test::Unit::TestCase
     assert_not_same(@shell.hbase_table('hbase:meta'), @shell.hbase_table('hbase:meta'))
   end
 
+  define_test "Shell::Shell#hbase attribute is a HBase instance" do
+    assert_kind_of(Hbase::Hbase, @shell.hbase)
+  end
+
   #-------------------------------------------------------------------------------
 
   define_test "Shell::Shell#export_commands should export command methods to specified object" do

http://git-wip-us.apache.org/repos/asf/hbase/blob/70762faa/hbase-shell/src/test/ruby/test_helper.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb
index 11645d5..0b3c420 100644
--- a/hbase-shell/src/test/ruby/test_helper.rb
+++ b/hbase-shell/src/test/ruby/test_helper.rb
@@ -55,7 +55,7 @@ module Hbase
     end
 
     def admin
-      @shell.hbase_admin
+      @shell.admin
     end
 
     def taskmonitor


[10/50] hbase git commit: HBASE-15933 NullPointerException may be thrown from SimpleRegionNormalizer#getRegionSize()

Posted by sy...@apache.org.
HBASE-15933 NullPointerException may be thrown from SimpleRegionNormalizer#getRegionSize()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cfe868d5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cfe868d5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cfe868d5

Branch: refs/heads/hbase-12439
Commit: cfe868d56eeb0367c2fcf4a18a1d06c57abb7e54
Parents: a0f49c9
Author: tedyu <yu...@gmail.com>
Authored: Thu Jun 2 01:55:28 2016 -0700
Committer: tedyu <yu...@gmail.com>
Committed: Thu Jun 2 01:55:28 2016 -0700

----------------------------------------------------------------------
 .../hbase/master/normalizer/SimpleRegionNormalizer.java      | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/cfe868d5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index 583f873..d209eb7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -140,7 +140,9 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
     for (int i = 0; i < tableRegions.size(); i++) {
       HRegionInfo hri = tableRegions.get(i);
       long regionSize = getRegionSize(hri);
-      totalSizeMb += regionSize;
+      if (regionSize > 0) {
+        totalSizeMb += regionSize;
+      }
     }
 
     double avgRegionSize = totalSizeMb / (double) tableRegions.size();
@@ -204,6 +206,10 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
       getRegionServerOfRegion(hri);
     RegionLoad regionLoad = masterServices.getServerManager().getLoad(sn).
       getRegionsLoad().get(hri.getRegionName());
+    if (regionLoad == null) {
+      LOG.debug(hri.getRegionNameAsString() + " was not found in RegionsLoad");
+      return -1;
+    }
     return regionLoad.getStorefileSizeMB();
   }
 }


[25/50] hbase git commit: HBASE-15174 Client Public API should not have PB objects in 2.0 (Ram)

Posted by sy...@apache.org.
HBASE-15174 Client Public API should not have PB objects in 2.0 (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b21c56e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b21c56e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b21c56e7

Branch: refs/heads/hbase-12439
Commit: b21c56e7958652ca6e6daf04642eb51abaf2b3d7
Parents: 70762fa
Author: Ramkrishna <ra...@intel.com>
Authored: Mon Jun 6 10:11:38 2016 +0530
Committer: Ramkrishna <ra...@intel.com>
Committed: Mon Jun 6 10:11:38 2016 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/RegionLoad.java     |   1 +
 .../org/apache/hadoop/hbase/ServerLoad.java     |   2 +
 .../hadoop/hbase/client/RegionLoadStats.java    |   4 +-
 .../hbase/TestInterfaceAudienceAnnotations.java | 152 +++++++++++++++++++
 .../org/apache/hadoop/hbase/util/Triple.java    |   4 +
 5 files changed, 161 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b21c56e7/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index 5bf2ec7..b5852d4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -38,6 +38,7 @@ public class RegionLoad {
 
   protected ClusterStatusProtos.RegionLoad regionLoadPB;
 
+  @InterfaceAudience.Private
   public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) {
     this.regionLoadPB = regionLoadPB;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b21c56e7/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 1ddcc20..3ea59db 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -57,6 +57,7 @@ public class ServerLoad {
   private long totalCompactingKVs = 0;
   private long currentCompactedKVs = 0;
 
+  @InterfaceAudience.Private
   public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) {
     this.serverLoad = serverLoad;
     for (ClusterStatusProtos.RegionLoad rl: serverLoad.getRegionLoadsList()) {
@@ -81,6 +82,7 @@ public class ServerLoad {
   // NOTE: Function name cannot start with "get" because then an OpenDataException is thrown because
   // HBaseProtos.ServerLoad cannot be converted to an open data type(see HBASE-5967).
   /* @return the underlying ServerLoad protobuf object */
+  @InterfaceAudience.Private
   public ClusterStatusProtos.ServerLoad obtainServerLoadPB() {
     return serverLoad;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b21c56e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java
index 443026f..bfdb216 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionLoadStats.java
@@ -20,11 +20,11 @@ package org.apache.hadoop.hbase.client;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
 /**
  * POJO representing region server load
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class RegionLoadStats {
   int memstoreLoad;
   int heapOccupancy;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b21c56e7/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
index 0e0fbb0..426b6a7 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java
@@ -20,7 +20,11 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 import java.lang.annotation.Annotation;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
 import java.lang.reflect.Modifier;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.logging.Log;
@@ -28,6 +32,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.Triple;
 import org.apache.hadoop.hbase.ClassFinder.And;
 import org.apache.hadoop.hbase.ClassFinder.FileNameFilter;
 import org.apache.hadoop.hbase.ClassFinder.Not;
@@ -59,6 +65,7 @@ import org.junit.experimental.categories.Category;
 @Category(SmallTests.class)
 public class TestInterfaceAudienceAnnotations {
 
+  private static final String HBASE_PROTOBUF = "org.apache.hadoop.hbase.protobuf.generated";
   private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class);
 
   /** Selects classes with generated in their package name */
@@ -180,6 +187,28 @@ public class TestInterfaceAudienceAnnotations {
         c.equals(InterfaceStability.Evolving.class);
   }
 
+  private boolean isInterfacePrivateMethod(Method m) {
+    if(m.getDeclaredAnnotations().length > 0) {
+      for(Annotation ann : m.getDeclaredAnnotations()) {
+        if(ann.annotationType().equals(InterfaceAudience.Private.class)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  private boolean isInterfacePrivateContructor(Constructor<?> c) {
+    if(c.getDeclaredAnnotations().length > 0) {
+      for(Annotation ann : c.getDeclaredAnnotations()) {
+        if(ann.annotationType().equals(InterfaceAudience.Private.class)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
   /** Selects classes that are declared public */
   class PublicClassFilter implements ClassFinder.ClassFilter {
     @Override
@@ -299,4 +328,127 @@ public class TestInterfaceAudienceAnnotations {
         + "have @InterfaceStability annotation as well",
       0, classes.size());
   }
+
+  @Test
+  public void testProtosInReturnTypes() throws ClassNotFoundException, IOException, LinkageError {
+    Set<Class<?>> classes = findPublicClasses();
+    List<Pair<Class<?>, Method>> protosReturnType = new ArrayList<Pair<Class<?>, Method>>();
+    for (Class<?> clazz : classes) {
+      findProtoInReturnType(clazz, protosReturnType);
+    }
+    if (protosReturnType.size() != 0) {
+      LOG.info("These are the methods that have Protos as the return type");
+      for (Pair<Class<?>, Method> pair : protosReturnType) {
+        LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " "
+            + pair.getSecond().getReturnType().getName());
+      }
+    }
+
+    Assert.assertEquals("Public exposed methods should not have protos in return type", 0,
+      protosReturnType.size());
+  }
+
+  private Set<Class<?>> findPublicClasses()
+      throws ClassNotFoundException, IOException, LinkageError {
+    ClassFinder classFinder =
+        new ClassFinder(new And(new MainCodeResourcePathFilter(), new TestFileNameFilter()),
+            new Not((FileNameFilter) new TestFileNameFilter()),
+            new And(new PublicClassFilter(), new Not(new TestClassFilter()),
+                new Not(new GeneratedClassFilter()),
+                new InterfaceAudiencePublicAnnotatedClassFilter()));
+    Set<Class<?>> classes = classFinder.findClasses(false);
+    return classes;
+  }
+
+  @Test
+  public void testProtosInParamTypes() throws ClassNotFoundException, IOException, LinkageError {
+    Set<Class<?>> classes = findPublicClasses();
+    List<Triple<Class<?>, Method, Class<?>>> protosParamType =
+        new ArrayList<Triple<Class<?>, Method, Class<?>>>();
+    for (Class<?> clazz : classes) {
+      findProtoInParamType(clazz, protosParamType);
+    }
+
+    if (protosParamType.size() != 0) {
+      LOG.info("These are the methods that have Protos as the param type");
+      for (Triple<Class<?>, Method, Class<?>> pair : protosParamType) {
+        LOG.info(pair.getFirst().getName() + " " + pair.getSecond().getName() + " "
+            + pair.getThird().getName());
+      }
+    }
+
+    Assert.assertEquals("Public exposed methods should not have protos in param type", 0,
+      protosParamType.size());
+  }
+
+  @Test
+  public void testProtosInConstructors() throws ClassNotFoundException, IOException, LinkageError {
+    Set<Class<?>> classes = findPublicClasses();
+    List<Class<?>> classList = new ArrayList<Class<?>>();
+    for (Class<?> clazz : classes) {
+      Constructor<?>[] constructors = clazz.getConstructors();
+      for (Constructor<?> cons : constructors) {
+        if (!isInterfacePrivateContructor(cons)) {
+          Class<?>[] parameterTypes = cons.getParameterTypes();
+          for (Class<?> param : parameterTypes) {
+            if (param.getName().contains(HBASE_PROTOBUF)) {
+              classList.add(clazz);
+              break;
+            }
+          }
+        }
+      }
+    }
+
+    if (classList.size() != 0) {
+      LOG.info("These are the classes that have Protos in the constructor");
+      for (Class<?> clazz : classList) {
+        LOG.info(clazz.getName());
+      }
+    }
+
+    Assert.assertEquals("Public exposed classes should not have protos in constructors", 0,
+      classList.size());
+  }
+
+  private void findProtoInReturnType(Class<?> clazz,
+      List<Pair<Class<?>, Method>> protosReturnType) {
+    Pair<Class<?>, Method> returnTypePair = new Pair<Class<?>, Method>();
+    Method[] methods = clazz.getMethods();
+    returnTypePair.setFirst(clazz);
+    for (Method method : methods) {
+      if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) {
+        if (!isInterfacePrivateMethod(method)) {
+          Class<?> returnType = method.getReturnType();
+          if (returnType.getName().contains(HBASE_PROTOBUF)) {
+            returnTypePair.setSecond(method);
+            protosReturnType.add(returnTypePair);
+            continue;
+          }
+        }
+      }
+    }
+  }
+
+  private void findProtoInParamType(Class<?> clazz,
+      List<Triple<Class<?>, Method, Class<?>>> protosParamType) {
+    Triple<Class<?>, Method, Class<?>> paramType = new Triple<Class<?>, Method, Class<?>>();
+    Method[] methods = clazz.getMethods();
+    paramType.setFirst(clazz);
+    for (Method method : methods) {
+      if (clazz.isInterface() || method.getModifiers() == Modifier.PUBLIC) {
+        if (!isInterfacePrivateMethod(method)) {
+          Class<?>[] parameters = method.getParameterTypes();
+          for (Class<?> param : parameters) {
+            if (param.getName().contains(HBASE_PROTOBUF)) {
+              paramType.setSecond(method);
+              paramType.setThird(param);
+              protosParamType.add(paramType);
+              break;
+            }
+          }
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b21c56e7/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java
index 1438ab7..1de6bee 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Triple.java
@@ -28,6 +28,10 @@ public class Triple<A, B, C> {
   private A first;
   private B second;
   private C third;
+  // default constructor
+  public Triple() {
+
+  }
 
   public Triple(A first, B second, C third) {
     this.first = first;


[42/50] hbase git commit: Revert "HBASE-15994 Allow selection of RpcSchedulers Adds logging by the RpcExecutors of their run configs Changes the default RpcSchedulerFactory from SimpleRpcSchedulerFactory.class to RpcSchedulerFactoryImpl.class. RpcSchedul

Posted by sy...@apache.org.
Revert "HBASE-15994 Allow selection of RpcSchedulers Adds logging by the RpcExecutors of their run configs Changes the default RpcSchedulerFactory from SimpleRpcSchedulerFactory.class to RpcSchedulerFactoryImpl.class. RpcSchedulerFactoryImpl.class. defaults to using SimpleRpcSchedulerFactory.class and the SimpleRpcScheduler, as has been default up to this, unless you set "hbase.region.server.rpc.scheduler.class" to org.apache.hadoop.hbase.ipc.FifoRpcScheduler"

This reverts commit 3ac4a57fd205e1909c874cabd7fda9fb176f3f0f.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/407aa4d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/407aa4d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/407aa4d4

Branch: refs/heads/hbase-12439
Commit: 407aa4d4963eb0d4aa3e0950babc8cec31367e11
Parents: 3ac4a57
Author: stack <st...@apache.org>
Authored: Wed Jun 8 20:22:50 2016 -0700
Committer: stack <st...@apache.org>
Committed: Wed Jun 8 20:22:50 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/BalancedQueueRpcExecutor.java     |  4 --
 .../hadoop/hbase/ipc/FifoRpcScheduler.java      |  5 --
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java    | 54 +++++++--------
 .../regionserver/FifoRpcSchedulerFactory.java   | 47 -------------
 .../hbase/regionserver/RpcSchedulerFactory.java |  4 +-
 .../regionserver/SimpleRpcSchedulerFactory.java |  6 +-
 .../regionserver/TestRpcSchedulerFactory.java   | 71 --------------------
 7 files changed, 30 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
index 3505221..e4205eb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
@@ -22,8 +22,6 @@ import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -38,7 +36,6 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
 @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX })
 @InterfaceStability.Evolving
 public class BalancedQueueRpcExecutor extends RpcExecutor {
-  private static final Log LOG = LogFactory.getLog(BalancedQueueRpcExecutor.class);
 
   protected final List<BlockingQueue<CallRunner>> queues;
   private final QueueBalancer balancer;
@@ -65,7 +62,6 @@ public class BalancedQueueRpcExecutor extends RpcExecutor {
     queues = new ArrayList<BlockingQueue<CallRunner>>(numQueues);
     this.balancer = getBalancer(numQueues);
     initializeQueues(numQueues, queueClass, initargs);
-    LOG.debug(name + " queues=" + numQueues + " handlerCount=" + handlerCount);
   }
 
   protected void initializeQueues(final int numQueues,

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index 70d903a..ee36f3f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DaemonThreadFactory;
 
@@ -34,7 +32,6 @@ import java.util.concurrent.atomic.AtomicInteger;
  * This can be used for HMaster, where no prioritization is needed.
  */
 public class FifoRpcScheduler extends RpcScheduler {
-  private static final Log LOG = LogFactory.getLog(FifoRpcScheduler.class);
   private final int handlerCount;
   private final int maxQueueLength;
   private final AtomicInteger queueSize = new AtomicInteger(0);
@@ -44,8 +41,6 @@ public class FifoRpcScheduler extends RpcScheduler {
     this.handlerCount = handlerCount;
     this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH,
         handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
-    LOG.info("Using " + this.getClass().getSimpleName() + " as user call queue; handlerCount=" +
-        handlerCount + "; maxQueueLength=" + maxQueueLength);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index d9d61c1..431aeeb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -34,11 +34,8 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.util.BoundedPriorityBlockingQueue;
 
 /**
- * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'),
- * high-priority ('priority'), and replication ('replication') requests. Default behavior is to
- * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc.
- * See below article for explanation of options.
- * @see <a href="http://blog.cloudera.com/blog/2014/12/new-in-cdh-5-2-improvements-for-running-multiple-workloads-on-a-single-hbase-cluster/">Overview on Request Queuing</a>
+ * A scheduler that maintains isolated handler pools for general,
+ * high-priority, and replication requests.
  */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
@@ -52,8 +49,7 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
   public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY =
       "hbase.ipc.server.callqueue.handler.factor";
 
-  /** If set to 'deadline', the default, uses a priority queue and deprioritizes long-running scans
-   */
+  /** If set to 'deadline', uses a priority queue and deprioritize long-running scans */
   public static final String CALL_QUEUE_TYPE_CONF_KEY = "hbase.ipc.server.callqueue.type";
   public static final String CALL_QUEUE_TYPE_CODEL_CONF_VALUE = "codel";
   public static final String CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE = "deadline";
@@ -194,58 +190,54 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
 
     float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
     int numCallQueues = Math.max(1, (int)Math.round(handlerCount * callQueuesHandlersFactor));
-    LOG.info("Using " + callQueueType + " as user call queue; numCallQueues=" + numCallQueues +
-        "; callQReadShare=" + callqReadShare + ", callQScanShare=" + callqScanShare);
+
+    LOG.info("Using " + callQueueType + " as user call queue, count=" + numCallQueues);
+
     if (numCallQueues > 1 && callqReadShare > 0) {
       // multiple read/write queues
-      if (isDeadlineQueueType(callQueueType)) {
+      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
             callqReadShare, callqScanShare, maxQueueLength, conf, abortable,
             BoundedPriorityBlockingQueue.class, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
         Object[] callQueueInitArgs = {maxQueueLength, codelTargetDelay, codelInterval,
           codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches};
-        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount,
+        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount,
           numCallQueues, callqReadShare, callqScanShare,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs);
       } else {
-        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
           callqReadShare, callqScanShare, maxQueueLength, conf, abortable);
       }
     } else {
       // multiple queues
-      if (isDeadlineQueueType(callQueueType)) {
+      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor =
-          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
-            conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
+        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
+          conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
-        callExecutor =
-          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
-            conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
-            codelTargetDelay, codelInterval, codelLifoThreshold,
-            numGeneralCallsDropped, numLifoModeSwitches);
+        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
+          conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
+          codelTargetDelay, codelInterval, codelLifoThreshold,
+          numGeneralCallsDropped, numLifoModeSwitches);
       } else {
-        callExecutor = new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount,
+        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount,
             numCallQueues, maxQueueLength, conf, abortable);
       }
     }
+
     // Create 2 queues to help priorityExecutor be more scalable.
     this.priorityExecutor = priorityHandlerCount > 0 ?
-      new BalancedQueueRpcExecutor("BalancedQ.priority", priorityHandlerCount, 2,
-          maxPriorityQueueLength):
-      null;
+        new BalancedQueueRpcExecutor("Priority", priorityHandlerCount, 2, maxPriorityQueueLength) :
+        null;
+
    this.replicationExecutor =
-     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("BalancedQ.replication",
+     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("Replication",
        replicationHandlerCount, 1, maxQueueLength, conf, abortable) : null;
   }
 
-  private static boolean isDeadlineQueueType(final String callQueueType) {
-    return callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE);
-  }
-
   public SimpleRpcScheduler(
 	      Configuration conf,
 	      int handlerCount,

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
deleted file mode 100644
index f4b51ba..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
-import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
-import org.apache.hadoop.hbase.ipc.PriorityFunction;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-
-/**
- * Factory to use when you want to use the {@link FifoRpcScheduler}
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class FifoRpcSchedulerFactory implements RpcSchedulerFactory {
-  @Override
-  public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
-    int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-      HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
-    return new FifoRpcScheduler(conf, handlerCount);
-  }
-
-  @Deprecated
-  @Override
-  public RpcScheduler create(Configuration conf, PriorityFunction priority) {
-    return create(conf, priority, null);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
index 7bc59da..f554781 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public interface RpcSchedulerFactory {
+
   /**
    * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}.
    */
@@ -38,4 +39,5 @@ public interface RpcSchedulerFactory {
 
   @Deprecated
   RpcScheduler create(Configuration conf, PriorityFunction priority);
-}
\ No newline at end of file
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 92462c8..743c5bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
 
-/** Constructs a {@link SimpleRpcScheduler}.
- */
+/** Constructs a {@link SimpleRpcScheduler}. */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
+
   @Override
   @Deprecated
   public RpcScheduler create(Configuration conf, PriorityFunction priority) {
@@ -42,6 +42,7 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
         HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+
     return new SimpleRpcScheduler(
       conf,
       handlerCount,
@@ -53,4 +54,5 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
       server,
       HConstants.QOS_THRESHOLD);
   }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/407aa4d4/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
deleted file mode 100644
index 9366c54..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.regionserver;
-
-import static org.junit.Assert.assertTrue;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CategoryBasedTimeout;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
-import org.apache.hadoop.hbase.ipc.RpcScheduler;
-import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Before;
-import org.junit.ClassRule;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
-
-/**
- * A silly test that does nothing but make sure an rpcscheduler factory makes what it says
- * it is going to make.
- */
-@Category(SmallTests.class)
-public class TestRpcSchedulerFactory {
-  @Rule public TestName testName = new TestName();
-  @ClassRule public static TestRule timeout =
-      CategoryBasedTimeout.forClass(TestRpcSchedulerFactory.class);
-  private Configuration conf;
-
-  @Before
-  public void setUp() throws Exception {
-    this.conf = HBaseConfiguration.create();
-  }
-
-  @Test
-  public void testRWQ() {
-    // Set some configs just to see how it changes the scheduler. Can't assert the settings had
-    // an effect. Just eyeball the log.
-    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5);
-    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5);
-    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0.5);
-    RpcSchedulerFactory factory = new SimpleRpcSchedulerFactory();
-    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
-    assertTrue(rpcScheduler.getClass().equals(SimpleRpcScheduler.class));
-  }
-
-  @Test
-  public void testFifo() {
-    RpcSchedulerFactory factory = new FifoRpcSchedulerFactory();
-    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
-    assertTrue(rpcScheduler.getClass().equals(FifoRpcScheduler.class));
-  }
-}
\ No newline at end of file


[33/50] hbase git commit: HBASE-15981 Remove references to disabling table in docs around stripe and date-tiered compactions

Posted by sy...@apache.org.
HBASE-15981 Remove references to disabling table in docs around stripe and date-tiered compactions

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2da090f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2da090f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2da090f9

Branch: refs/heads/hbase-12439
Commit: 2da090f9a3ca42b07f342035c952ce0465c67265
Parents: 1125215
Author: Bryan Beaudreault <bb...@hubspot.com>
Authored: Tue Jun 7 15:31:54 2016 -0400
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 13:10:35 2016 -0700

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/architecture.adoc | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/2da090f9/src/main/asciidoc/_chapters/architecture.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/architecture.adoc b/src/main/asciidoc/_chapters/architecture.adoc
index faa1230..9f59cd5 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -2119,7 +2119,7 @@ This is not necessary on new tables.
 [[ops.date.tiered.config]]
 ====== Configuring Date Tiered Compaction
 
-Each of the settings for date tiered compaction should be configured at the table or column family, after disabling the table.
+Each of the settings for date tiered compaction should be configured at the table or column family level.
 If you use HBase shell, the general command pattern is as follows:
 
 [source,sql]
@@ -2199,7 +2199,6 @@ You can enable stripe compaction for a table or a column family, by setting its
 You also need to set the `hbase.hstore.blockingStoreFiles` to a high number, such as 100 (rather than the default value of 10).
 
 .Procedure: Enable Stripe Compaction
-. If the table already exists, disable the table.
 . Run one of following commands in the HBase shell.
   Replace the table name `orders_table` with the name of your table.
 +
@@ -2215,7 +2214,6 @@ create 'orders_table', 'blobs_cf', CONFIGURATION => {'hbase.hstore.engine.class'
 . Enable the table.
 
 .Procedure: Disable Stripe Compaction
-. Disable the table.
 . Set the `hbase.hstore.engine.class` option to either nil or `org.apache.hadoop.hbase.regionserver.DefaultStoreEngine`.
   Either option has the same effect.
 +
@@ -2232,7 +2230,7 @@ This is not necessary on new tables.
 [[ops.stripe.config]]
 ====== Configuring Stripe Compaction
 
-Each of the settings for stripe compaction should be configured at the table or column family, after disabling the table.
+Each of the settings for stripe compaction should be configured at the table or column family level.
 If you use HBase shell, the general command pattern is as follows:
 
 [source,sql]


[12/50] hbase git commit: HBASE-15938 submit-patch.py: Don't crash if there are tests with same name. Refactor: Split out flaky dashboard html template to separate file. (Apekshit)

Posted by sy...@apache.org.
HBASE-15938 submit-patch.py: Don't crash if there are tests with same name. Refactor: Split out flaky dashboard html template to separate file. (Apekshit)

Change-Id: Ie5875bdefbf886984a57dfc85661be2ac9592a7b

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9593a9f3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9593a9f3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9593a9f3

Branch: refs/heads/hbase-12439
Commit: 9593a9f39663e454a24f16c907e00e19cb65b903
Parents: 4ffea77
Author: Apekshit <ap...@gmail.com>
Authored: Wed Jun 1 19:12:50 2016 -0700
Committer: stack <st...@apache.org>
Committed: Thu Jun 2 08:55:42 2016 -0700

----------------------------------------------------------------------
 dev-support/findHangingTests.py           |  14 ++-
 dev-support/flaky-dashboard-template.html | 122 +++++++++++++++++++++++++
 dev-support/report-flakies.py             | 112 +----------------------
 3 files changed, 137 insertions(+), 111 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9593a9f3/dev-support/findHangingTests.py
----------------------------------------------------------------------
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
index 9ef8708..28f4895 100755
--- a/dev-support/findHangingTests.py
+++ b/dev-support/findHangingTests.py
@@ -46,14 +46,22 @@ def get_bad_tests(console_url):
         result1 = re.match("^Running org.apache.hadoop.hbase.(\w*\.)*(\w*)", line)
         if result1:
             test_case = result1.group(2)
-            hanging_tests.add(test_case)
-            all_tests.add(test_case)
+            if test_case in all_tests:
+                print  ("ERROR! Multiple tests with same name '{}'. Might get wrong results "
+                       "for this test.".format(test_case))
+            else:
+                hanging_tests.add(test_case)
+                all_tests.add(test_case)
         result2 = re.match("^Tests run:.*- in org.apache.hadoop.hbase.(\w*\.)*(\w*)", line)
         if result2:
             test_case = result2.group(2)
-            hanging_tests.remove(test_case)
             if "FAILURE!" in line:
                 failed_tests.add(test_case)
+            if test_case not in hanging_tests:
+                print  ("ERROR! No test '{}' found in hanging_tests. Might get wrong results "
+                        "for this test.".format(test_case))
+            else:
+                hanging_tests.remove(test_case)
         result3 = re.match("^\s+(\w*).*\sTestTimedOut", line)
         if result3:
             test_case = result3.group(1)

http://git-wip-us.apache.org/repos/asf/hbase/blob/9593a9f3/dev-support/flaky-dashboard-template.html
----------------------------------------------------------------------
diff --git a/dev-support/flaky-dashboard-template.html b/dev-support/flaky-dashboard-template.html
new file mode 100644
index 0000000..77dfc86
--- /dev/null
+++ b/dev-support/flaky-dashboard-template.html
@@ -0,0 +1,122 @@
+<!--
+ - Licensed to the Apache Software Foundation (ASF) under one
+ - or more contributor license agreements.  See the NOTICE file
+ - distributed with this work for additional information
+ - regarding copyright ownership.  The ASF licenses this file
+ - to you under the Apache License, Version 2.0 (the
+ - "License"); you may not use this file except in compliance
+ - with the License.  You may obtain a copy of the License at
+ -
+ -     http://www.apache.org/licenses/LICENSE-2.0
+ -
+ - Unless required by applicable law or agreed to in writing, software
+ - distributed under the License is distributed on an "AS IS" BASIS,
+ - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ - See the License for the specific language governing permissions and
+ - limitations under the License.
+ -->
+<!DOCTYPE html>
+<html>
+<head>
+    <title>Apache HBase Flaky Dashboard</title>
+    <style type="text/css">
+        table {
+            table-layout: fixed;
+        }
+        th {
+            font-size: 15px;
+        }
+        td {
+            font-size: 18px;
+            vertical-align: text-top;
+            overflow: hidden;
+            white-space: nowrap;
+        }
+        .show_hide_button {
+            font-size: 100%;
+            padding: .5em 1em;
+            border: 0 rgba(0,0,0,0);
+            border-radius: 10px;
+        }
+    </style>
+</head>
+<body>
+<p>
+    <img style="vertical-align:middle; display:inline-block;" height="80px"
+         src="https://hbase.apache.org/images/hbase_logo_with_orca_large.png">
+    &nbsp;&nbsp;&nbsp;&nbsp;
+              <span style="font-size:50px; vertical-align:middle; display:inline-block;">
+                  Apache HBase Flaky Tests Dashboard
+              </span>
+</p>
+<br><br>
+{% set counter = 0 %}
+{% for url in results %}
+{% set result = results[url] %}
+{# Dedup ids since test names may duplicate across urls #}
+{% set counter = counter + 1 %}
+                <span style="font-size:20px; font-weight:bold;">Job : {{ url |e }}
+                <a href="{{ url |e }}" style="text-decoration:none;">&#x1f517;</a></span>
+<br/><br/>
+<table>
+    <tr>
+        <th width="400px">Test Name</th>
+        <th width="150px">Flakyness</th>
+        <th width="200px">Failed/Timeout/Hanging</th>
+        <th>Run Ids</th>
+    </tr>
+    {% for test in result %}
+    {% set all = result[test]['all'] %}
+    {% set failed = result[test]['failed'] %}
+    {% set timeout = result[test]['timeout'] %}
+    {% set hanging = result[test]['hanging'] %}
+    {% set success = all.difference(failed).difference(hanging) %}
+    <tr>
+        <td>{{ test |e }}</td>
+        {% set flakyness =
+        (failed|length + hanging|length) * 100 / all|length %}
+        {% if flakyness == 100 %}
+        <td align="middle" style="background-color:#FF9999;">
+            {% else %}
+        <td align="middle">
+            {% endif %}
+            {{ "{:.1f}% ({} / {})".format(
+            flakyness, failed|length + hanging|length, all|length) }}
+        </td>
+        <td align="middle">
+            {{ failed|length }} / {{ timeout|length }} / {{ hanging|length }}
+        </td>
+        <td>
+            {% set id = "details_" ~ test ~ "_" ~ counter  %}
+            <button class="show_hide_button" onclick="toggle('{{ id }}')">
+                show/hide</button>
+            <br/>
+            <div id="{{ id }}"
+                 style="display: none; width:500px; white-space: normal">
+                {% macro print_run_ids(url, run_ids) -%}
+                {% for i in run_ids %}
+                <a href="{{ url }}/{{ i }}">{{ i }}</a>&nbsp;
+                {% endfor %}
+                {%- endmacro %}
+                Failed : {{ print_run_ids(url, failed) }}<br/>
+                Timed Out : {{ print_run_ids(url, timeout) }}<br/>
+                Hanging : {{ print_run_ids(url, hanging) }}<br/>
+                Succeeded : {{ print_run_ids(url, success) }}
+            </div>
+        </td>
+    </tr>
+    {% endfor %}
+</table>
+<br><br><br>
+{% endfor %}
+<script type="text/javascript">
+    function toggle(id) {
+        if (document.getElementById(id).style["display"] == "none") {
+            document.getElementById(id).style["display"]  = "block";
+        } else {
+            document.getElementById(id).style["display"] = "none";
+        }
+    }
+</script>
+</body>
+</html>

http://git-wip-us.apache.org/repos/asf/hbase/blob/9593a9f3/dev-support/report-flakies.py
----------------------------------------------------------------------
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
index c0d16c7..676eca3 100755
--- a/dev-support/report-flakies.py
+++ b/dev-support/report-flakies.py
@@ -22,6 +22,7 @@
 import argparse
 import findHangingTests
 from jinja2 import Template
+import os
 import logging
 import requests
 
@@ -177,114 +178,9 @@ if args.mvn:
     with open("./failed", "w") as file:
         file.write(",".join(all_failed_tests))
 
-
-template = Template("""
-    <!DOCTYPE html>
-    <html>
-        <head>
-        <title>Apache HBase Flaky Dashboard</title>
-        <style type="text/css">
-            table {
-                table-layout: fixed;
-            }
-            th {
-                font-size: 15px;
-            }
-            td {
-                font-size: 18px;
-                vertical-align: text-top;
-                overflow: hidden;
-                white-space: nowrap;
-            }
-            .show_hide_button {
-                font-size: 100%;
-                padding: .5em 1em;
-                border: 0 rgba(0,0,0,0);
-                border-radius: 10px;
-            }
-        </style>
-        </head>
-        <body>
-            <p>
-              <img style="vertical-align:middle; display:inline-block;" height="80px"
-                   src="https://hbase.apache.org/images/hbase_logo_with_orca_large.png">
-              &nbsp;&nbsp;&nbsp;&nbsp;
-              <span style="font-size:50px; vertical-align:middle; display:inline-block;">
-                  Apache HBase Flaky Tests Dashboard
-              </span>
-            </p>
-            <br><br>
-            {% set counter = 0 %}
-            {% for url in results %}
-                {% set result = results[url] %}
-                {# Dedup ids since test names may duplicate across urls #}
-                {% set counter = counter + 1 %}
-                <span style="font-size:20px; font-weight:bold;">Job : {{ url |e }}
-                <a href="{{ url |e }}" style="text-decoration:none;">&#x1f517;</a></span>
-                <br/><br/>
-                <table>
-                    <tr>
-                        <th width="400px">Test Name</th>
-                        <th width="150px">Flakyness</th>
-                        <th width="200px">Failed/Timeout/Hanging</th>
-                        <th>Run Ids</th>
-                    </tr>
-                    {% for test in result %}
-                        {% set all = result[test]['all'] %}
-                        {% set failed = result[test]['failed'] %}
-                        {% set timeout = result[test]['timeout'] %}
-                        {% set hanging = result[test]['hanging'] %}
-                        {% set success = all.difference(failed).difference(hanging) %}
-                        <tr>
-                            <td>{{ test |e }}</td>
-                            {% set flakyness =
-                                (failed|length + hanging|length) * 100 / all|length %}
-                            {% if flakyness == 100 %}
-                                <td align="middle" style="background-color:#FF9999;">
-                            {% else %}
-                                <td align="middle">
-                            {% endif %}
-                                    {{ "{:.1f}% ({} / {})".format(
-                                        flakyness, failed|length + hanging|length, all|length) }}
-                                </td>
-                            <td align="middle">
-                                {{ failed|length }} / {{ timeout|length }} / {{ hanging|length }}
-                            </td>
-                            <td>
-                                {% set id = "details_" ~ test ~ "_" ~ counter  %}
-                                <button class="show_hide_button" onclick="toggle('{{ id }}')">
-                                    show/hide</button>
-                                <br/>
-                                <div id="{{ id }}"
-                                    style="display: none; width:500px; white-space: normal">
-                                {% macro print_run_ids(url, run_ids) -%}
-                                    {% for i in run_ids %}
-                                        <a href="{{ url }}/{{ i }}">{{ i }}</a>&nbsp;
-                                    {% endfor %}
-                                {%- endmacro %}
-                                    Failed : {{ print_run_ids(url, failed) }}<br/>
-                                    Timed Out : {{ print_run_ids(url, timeout) }}<br/>
-                                    Hanging : {{ print_run_ids(url, hanging) }}<br/>
-                                    Succeeded : {{ print_run_ids(url, success) }}
-                                </div>
-                            </td>
-                        </tr>
-                    {% endfor %}
-                </table>
-                <br><br><br>
-            {% endfor %}
-            <script type="text/javascript">
-                function toggle(id) {
-                    if (document.getElementById(id).style["display"] == "none") {
-                        document.getElementById(id).style["display"]  = "block";
-                    } else {
-                        document.getElementById(id).style["display"] = "none";
-                    }
-                }
-            </script>
-        </body>
-    </html>
-    """)
+dev_support_dir = os.path.dirname(os.path.abspath(__file__))
+with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f:
+    template = Template(f.read())
 
 with open("dashboard.html", "w") as f:
     f.write(template.render(results=url_to_bad_test_results))


[03/50] hbase git commit: Remove the hbasecon banner logo

Posted by sy...@apache.org.
Remove the hbasecon banner logo


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c80e2326
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c80e2326
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c80e2326

Branch: refs/heads/hbase-12439
Commit: c80e23264204c651ec7bd3e0fbf294ac728359e6
Parents: 5ea2f09
Author: stack <st...@apache.org>
Authored: Tue May 31 10:14:40 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue May 31 10:14:40 2016 -0700

----------------------------------------------------------------------
 src/main/site/site.xml | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c80e2326/src/main/site/site.xml
----------------------------------------------------------------------
diff --git a/src/main/site/site.xml b/src/main/site/site.xml
index 6d4de53..fb237bb 100644
--- a/src/main/site/site.xml
+++ b/src/main/site/site.xml
@@ -43,13 +43,8 @@
     </fluidoSkin>
   </custom>
   <bannerLeft>
-    <name>hbasecon2016</name>
-    <!--
     <height>0</height>
     <width>0</width>
-    -->
-    <src>images/hbasecon2016-stacked.png</src>
-    <href>http://hbasecon.com/</href>
   </bannerLeft>
   <bannerRight>
     <name>Apache HBase</name>


[05/50] hbase git commit: HBASE-15907 updates for HBase Shell pre-splitting docs

Posted by sy...@apache.org.
HBASE-15907 updates for HBase Shell pre-splitting docs

(cherry picked from commit 01adec574d9ccbdd6183466cb8ee6b43935d69ca)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/73ec3385
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/73ec3385
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/73ec3385

Branch: refs/heads/hbase-12439
Commit: 73ec33856d0ee2ac1e058c6f7e1ccffa4476fbc0
Parents: eb64cd9
Author: Ronan Stokes <rs...@cloudera.com>
Authored: Mon May 30 23:52:43 2016 -0700
Committer: Misty Stanley-Jones <ms...@cloudera.com>
Committed: Tue May 31 13:52:46 2016 -0700

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/performance.adoc | 19 ++++++-
 src/main/asciidoc/_chapters/shell.adoc       | 62 +++++++++++++++++++++++
 2 files changed, 79 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/73ec3385/src/main/asciidoc/_chapters/performance.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/performance.adoc b/src/main/asciidoc/_chapters/performance.adoc
index a0c00ae..5f27640 100644
--- a/src/main/asciidoc/_chapters/performance.adoc
+++ b/src/main/asciidoc/_chapters/performance.adoc
@@ -499,7 +499,7 @@ For bulk imports, this means that all clients will write to the same region unti
 A useful pattern to speed up the bulk import process is to pre-create empty regions.
 Be somewhat conservative in this, because too-many regions can actually degrade performance.
 
-There are two different approaches to pre-creating splits.
+There are two different approaches to pre-creating splits using the HBase API.
 The first approach is to rely on the default `Admin` strategy (which is implemented in `Bytes.split`)...
 
 [source,java]
@@ -511,7 +511,7 @@ int numberOfRegions = ...;  // # of regions to create
 admin.createTable(table, startKey, endKey, numberOfRegions);
 ----
 
-And the other approach is to define the splits yourself...
+And the other approach, using the HBase API, is to define the splits yourself...
 
 [source,java]
 ----
@@ -519,8 +519,23 @@ byte[][] splits = ...;   // create your own splits
 admin.createTable(table, splits);
 ----
 
+You can achieve a similar effect using the HBase Shell to create tables by specifying split options. 
+
+[source]
+----
+# create table with specific split points
+hbase>create 't1','f1',SPLITS => ['\x10\x00', '\x20\x00', '\x30\x00', '\x40\x00']
+
+# create table with four regions based on random bytes keys
+hbase>create 't2','f1', { NUMREGIONS => 4 , SPLITALGO => 'UniformSplit' }
+
+# create table with five regions based on hex keys
+create 't3','f1', { NUMREGIONS => 5, SPLITALGO => 'HexStringSplit' }
+----
+
 See <<rowkey.regionsplits>> for issues related to understanding your keyspace and pre-creating regions.
 See <<manual_region_splitting_decisions,manual region splitting decisions>>  for discussion on manually pre-splitting regions.
+See <<tricks.pre-split>> for more details of using the HBase Shell to pre-split tables.
 
 [[def.log.flush]]
 ===  Table Creation: Deferred Log Flush

http://git-wip-us.apache.org/repos/asf/hbase/blob/73ec3385/src/main/asciidoc/_chapters/shell.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/shell.adoc b/src/main/asciidoc/_chapters/shell.adoc
index a4237fd..8f1f59b 100644
--- a/src/main/asciidoc/_chapters/shell.adoc
+++ b/src/main/asciidoc/_chapters/shell.adoc
@@ -352,6 +352,68 @@ hbase(main):022:0> Date.new(1218920189000).toString() => "Sat Aug 16 20:56:29 UT
 
 To output in a format that is exactly like that of the HBase log format will take a little messing with link:http://download.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html[SimpleDateFormat].
 
+[[tricks.pre-split]]
+=== Pre-splitting tables with the HBase Shell
+You can use a variety of options to pre-split tables when creating them via the HBase Shell `create` command.
+
+The simplest approach is to specify an array of split points when creating the table. Note that when specifying string literals as split points, these will create split points based on the underlying byte representation of the string. So when specifying a split point of '10', we are actually specifying the byte split point '\x31\30'.
+
+The split points will define `n+1` regions where `n` is the number of split points. The lowest region will contain all keys from the lowest possible key up to but not including the first split point key.
+The next region will contain keys from the first split point up to, but not including the next split point key.
+This will continue for all split points up to the last. The last region will be defined from the last split point up to the maximum possible key.
+
+[source]
+----
+hbase>create 't1','f',SPLITS => ['10','20',30']
+----
+
+In the above example, the table 't1' will be created with column family 'f', pre-split to four regions. Note the first region will contain all keys from '\x00' up to '\x30' (as '\x31' is the ASCII code for '1').
+
+You can pass the split points in a file using following variation. In this example, the splits are read from a file corresponding to the local path on the local filesystem. Each line in the file specifies a split point key.
+
+[source]
+----
+hbase>create 't14','f',SPLITS_FILE=>'splits.txt'
+----
+
+The other options are to automatically compute splits based on a desired number of regions and a splitting algorithm.
+HBase supplies algorithms for splitting the key range based on uniform splits or based on hexadecimal keys, but you can provide your own splitting algorithm to subdivide the key range.
+
+[source]
+----
+# create table with four regions based on random bytes keys
+hbase>create 't2','f1', { NUMREGIONS => 4 , SPLITALGO => 'UniformSplit' }
+
+# create table with five regions based on hex keys
+hbase>create 't3','f1', { NUMREGIONS => 5, SPLITALGO => 'HexStringSplit' }
+----
+
+As the HBase Shell is effectively a Ruby environment, you can use simple Ruby scripts to compute splits algorithmically.
+
+[source]
+----
+# generate splits for long (Ruby fixnum) key range from start to end key
+hbase(main):070:0> def gen_splits(start_key,end_key,num_regions)
+hbase(main):071:1>   results=[]
+hbase(main):072:1>   range=end_key-start_key
+hbase(main):073:1>   incr=(range/num_regions).floor
+hbase(main):074:1>   for i in 1 .. num_regions-1
+hbase(main):075:2>     results.push([i*incr+start_key].pack("N"))
+hbase(main):076:2>   end
+hbase(main):077:1>   return results
+hbase(main):078:1> end
+hbase(main):079:0>
+hbase(main):080:0> splits=gen_splits(1,2000000,10)
+=> ["\000\003\r@", "\000\006\032\177", "\000\t'\276", "\000\f4\375", "\000\017B<", "\000\022O{", "\000\025\\\272", "\000\030i\371", "\000\ew8"]
+hbase(main):081:0> create 'test_splits','f',SPLITS=>splits
+0 row(s) in 0.2670 seconds
+
+=> Hbase::Table - test_splits
+----
+
+Note that the HBase Shell command `truncate` effectively drops and recreates the table with default options which will discard any pre-splitting.
+If you need to truncate a pre-split table, you must drop and recreate the table explicitly to re-specify custom split options.
+
 === Debug
 
 ==== Shell debug switch


[44/50] hbase git commit: HBASE-15975 logic in TestHTableDescriptor#testAddCoprocessorWithSpecStr is wrong (Huaxiang Sun)

Posted by sy...@apache.org.
HBASE-15975 logic in TestHTableDescriptor#testAddCoprocessorWithSpecStr is wrong (Huaxiang Sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/41cc2155
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/41cc2155
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/41cc2155

Branch: refs/heads/hbase-12439
Commit: 41cc215544deb5f1f6cf522efd19229da0c99dd2
Parents: 031b745
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Jun 8 22:42:07 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Jun 8 22:42:50 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/TestHTableDescriptor.java       | 19 ++++++++++---------
 1 file changed, 10 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/41cc2155/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
index 680f2c1..70380e6 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java
@@ -55,35 +55,36 @@ public class TestHTableDescriptor {
   public void testAddCoprocessorWithSpecStr() throws IOException {
     HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME);
     String cpName = "a.b.c.d";
-    boolean expected = false;
     try {
       htd.addCoprocessorWithSpec(cpName);
+      fail();
     } catch (IllegalArgumentException iae) {
-      expected = true;
+      // Expected as cpName is invalid
     }
-    if (!expected) fail();
+
     // Try minimal spec.
     try {
       htd.addCoprocessorWithSpec("file:///some/path" + "|" + cpName);
+      fail();
     } catch (IllegalArgumentException iae) {
-      expected = false;
+      // Expected to be invalid
     }
-    if (expected) fail();
+
     // Try more spec.
     String spec = "hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
     try {
       htd.addCoprocessorWithSpec(spec);
     } catch (IllegalArgumentException iae) {
-      expected = false;
+      fail();
     }
-    if (expected) fail();
+
     // Try double add of same coprocessor
     try {
       htd.addCoprocessorWithSpec(spec);
+      fail();
     } catch (IOException ioe) {
-      expected = true;
+      // Expect that the coprocessor already exists
     }
-    if (!expected) fail();
   }
 
   @Test


[47/50] hbase git commit: HBASE-15925 provide default values for hadoop compat module related properties that match default hadoop profile.

Posted by sy...@apache.org.
HBASE-15925 provide default values for hadoop compat module related properties that match default hadoop profile.

Signed-off-by: Mikhail Antonov <an...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/108d39a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/108d39a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/108d39a7

Branch: refs/heads/hbase-12439
Commit: 108d39a7277bdcbc8b4bc9c50bab457a86a71971
Parents: 55a04b7
Author: Sean Busbey <bu...@apache.org>
Authored: Thu Jun 9 13:30:45 2016 -0500
Committer: Sean Busbey <bu...@apache.org>
Committed: Thu Jun 9 16:29:28 2016 -0500

----------------------------------------------------------------------
 pom.xml | 7 +++++++
 1 file changed, 7 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/108d39a7/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 6652b78..0e33ae8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1212,6 +1212,13 @@
     <!-- Dependencies -->
     <hadoop-two.version>2.7.1</hadoop-two.version>
     <hadoop-three.version>3.0.0-SNAPSHOT</hadoop-three.version>
+    <!-- These must be defined here for downstream build tools that don't look at profiles.
+         They ought to match the values found in our default hadoop profile, which is
+         currently "hadoop-2.0". See HBASE-15925 for more info. -->
+    <hadoop.version>${hadoop-two.version}</hadoop.version>
+    <compat.module>hbase-hadoop2-compat</compat.module>
+    <assembly.file>src/main/assembly/hadoop-two-compat.xml</assembly.file>
+    <!-- end HBASE-15925 default hadoop compatibility values -->
     <commons-cli.version>1.2</commons-cli.version>
     <commons-codec.version>1.9</commons-codec.version>
     <!-- pretty outdated -->


[04/50] hbase git commit: HBASE-15917 Addendum. Fix bug in report-flakies.py where hanging tests are not being added to flaky list. (Apekshit) ADDENDUM #2!

Posted by sy...@apache.org.
HBASE-15917 Addendum. Fix bug in report-flakies.py where hanging tests are not being added to flaky list. (Apekshit)
ADDENDUM #2!

Change-Id: I9c55932d0f9e65b72ec8d3ae714144536b2bfe0a

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb64cd9d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb64cd9d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb64cd9d

Branch: refs/heads/hbase-12439
Commit: eb64cd9dd13ba297539c409989c63e800cb378a1
Parents: c80e232
Author: Apekshit <ap...@gmail.com>
Authored: Tue May 31 02:29:40 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue May 31 10:16:40 2016 -0700

----------------------------------------------------------------------
 dev-support/report-flakies.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb64cd9d/dev-support/report-flakies.py
----------------------------------------------------------------------
diff --git a/dev-support/report-flakies.py b/dev-support/report-flakies.py
index bdc88dc..c0d16c7 100755
--- a/dev-support/report-flakies.py
+++ b/dev-support/report-flakies.py
@@ -161,7 +161,7 @@ for url_max_build in expanded_urls:
     print ""
 
 
-all_bad_tests = all_timeout_tests.union(all_failed_tests)
+all_bad_tests = all_hanging_tests.union(all_failed_tests)
 if args.mvn:
     includes = ",".join(all_bad_tests)
     with open("./includes", "w") as inc_file:


[19/50] hbase git commit: HBASE-15845 Changes: - Renaming hbase.rb to hbase_constants.rb because there are two hbase.rb files right now which is confusing. - Remove omnipresence of formatter object since it is kind of a use-and-throw class. Commands shou

Posted by sy...@apache.org.
HBASE-15845 Changes:
- Renaming hbase.rb to hbase_constants.rb because there are two hbase.rb files right now which is confusing.
- Remove omnipresence of formatter object since it is kind of a use-and-throw class. Commands should create
  an instance, use it to format the output and discard it.
- Some refactoring

Change-Id: If9ea9873904e0a39d199a6aa10e23864b86a2f09


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bdb46f01
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bdb46f01
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bdb46f01

Branch: refs/heads/hbase-12439
Commit: bdb46f01b9645a0aa4e9827a7e627de76cd7983b
Parents: f0c159b
Author: Apekshit <ap...@gmail.com>
Authored: Mon Dec 28 14:50:50 2015 -0800
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Fri Jun 3 13:37:38 2016 -0700

----------------------------------------------------------------------
 bin/hirb.rb                                     |   8 +-
 hbase-shell/src/main/ruby/hbase.rb              | 109 -------------------
 hbase-shell/src/main/ruby/hbase/admin.rb        |  82 ++++++--------
 hbase-shell/src/main/ruby/hbase/hbase.rb        |  36 +++---
 hbase-shell/src/main/ruby/hbase/quotas.rb       |   3 +-
 .../src/main/ruby/hbase/replication_admin.rb    |   3 +-
 .../src/main/ruby/hbase/rsgroup_admin.rb        |   3 +-
 hbase-shell/src/main/ruby/hbase/security.rb     |   3 +-
 hbase-shell/src/main/ruby/hbase/table.rb        |   2 +-
 hbase-shell/src/main/ruby/hbase/taskmonitor.rb  |   3 +-
 .../src/main/ruby/hbase/visibility_labels.rb    |   5 +-
 hbase-shell/src/main/ruby/hbase_constants.rb    | 109 +++++++++++++++++++
 hbase-shell/src/main/ruby/shell.rb              |  25 ++---
 hbase-shell/src/main/ruby/shell/commands.rb     |   9 +-
 hbase-shell/src/test/ruby/hbase/admin_test.rb   |   3 +-
 hbase-shell/src/test/ruby/hbase/hbase_test.rb   |  11 +-
 .../test/ruby/hbase/replication_admin_test.rb   |   3 +-
 .../src/test/ruby/hbase/security_admin_test.rb  |   3 +-
 hbase-shell/src/test/ruby/hbase/table_test.rb   |   2 +-
 .../src/test/ruby/hbase/taskmonitor_test.rb     |   2 +-
 .../ruby/hbase/visibility_labels_admin_test.rb  |   3 +-
 .../src/test/ruby/shell/commands_test.rb        |   2 +-
 .../src/test/ruby/shell/noninteractive_test.rb  |   6 +-
 .../src/test/ruby/shell/rsgroup_shell_test.rb   |  18 ++-
 hbase-shell/src/test/ruby/shell/shell_test.rb   |   6 +-
 hbase-shell/src/test/ruby/test_helper.rb        |  10 +-
 26 files changed, 218 insertions(+), 251 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/bin/hirb.rb
----------------------------------------------------------------------
diff --git a/bin/hirb.rb b/bin/hirb.rb
index 94b5cdb..d0295d6 100644
--- a/bin/hirb.rb
+++ b/bin/hirb.rb
@@ -115,7 +115,7 @@ org.apache.log4j.Logger.getLogger("org.apache.zookeeper").setLevel(log_level)
 org.apache.log4j.Logger.getLogger("org.apache.hadoop.hbase").setLevel(log_level)
 
 # Require HBase now after setting log levels
-require 'hbase'
+require 'hbase_constants'
 
 # Load hbase shell
 require 'shell'
@@ -123,15 +123,11 @@ require 'shell'
 # Require formatter
 require 'shell/formatter'
 
-# Presume console format.
-# Formatter takes an :output_stream parameter, if you don't want STDOUT.
-@formatter = Shell::Formatter::Console.new
-
 # Setup the HBase module.  Create a configuration.
 @hbase = Hbase::Hbase.new
 
 # Setup console
-@shell = Shell::Shell.new(@hbase, @formatter, interactive)
+@shell = Shell::Shell.new(@hbase, interactive)
 @shell.debug = @shell_debug
 
 # Add commands to this namespace

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase.rb b/hbase-shell/src/main/ruby/hbase.rb
deleted file mode 100644
index bc6f37c..0000000
--- a/hbase-shell/src/main/ruby/hbase.rb
+++ /dev/null
@@ -1,109 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# HBase ruby classes.
-# Has wrapper classes for org.apache.hadoop.hbase.client.Admin
-# and for org.apache.hadoop.hbase.client.Table.  Classes take
-# Formatters on construction and outputs any results using
-# Formatter methods.  These classes are only really for use by
-# the hirb.rb HBase Shell script; they don't make much sense elsewhere.
-# For example, the exists method on Admin class prints to the formatter
-# whether the table exists and returns nil regardless.
-include Java
-
-include_class('java.lang.Integer') {|package,name| "J#{name}" }
-include_class('java.lang.Long') {|package,name| "J#{name}" }
-include_class('java.lang.Boolean') {|package,name| "J#{name}" }
-
-module HBaseConstants
-  COLUMN = "COLUMN"
-  COLUMNS = "COLUMNS"
-  TIMESTAMP = "TIMESTAMP"
-  TIMERANGE = "TIMERANGE"
-  NAME = org.apache.hadoop.hbase.HConstants::NAME
-  VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
-  IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
-  IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION
-  METADATA = org.apache.hadoop.hbase.HConstants::METADATA
-  STOPROW = "STOPROW"
-  STARTROW = "STARTROW"
-  ROWPREFIXFILTER = "ROWPREFIXFILTER"
-  ENDROW = STOPROW
-  RAW = "RAW"
-  LIMIT = "LIMIT"
-  METHOD = "METHOD"
-  MAXLENGTH = "MAXLENGTH"
-  CACHE_BLOCKS = "CACHE_BLOCKS"
-  ALL_METRICS = "ALL_METRICS"
-  METRICS = "METRICS"
-  REVERSED = "REVERSED"
-  REPLICATION_SCOPE = "REPLICATION_SCOPE"
-  INTERVAL = 'INTERVAL'
-  CACHE = 'CACHE'
-  FILTER = 'FILTER'
-  SPLITS = 'SPLITS'
-  SPLITS_FILE = 'SPLITS_FILE'
-  SPLITALGO = 'SPLITALGO'
-  NUMREGIONS = 'NUMREGIONS'
-  REGION_REPLICATION = 'REGION_REPLICATION'
-  REGION_REPLICA_ID = 'REGION_REPLICA_ID'
-  CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
-  ATTRIBUTES="ATTRIBUTES"
-  VISIBILITY="VISIBILITY"
-  AUTHORIZATIONS = "AUTHORIZATIONS"
-  SKIP_FLUSH = 'SKIP_FLUSH'
-  CONSISTENCY = "CONSISTENCY"
-  USER = 'USER'
-  TABLE = 'TABLE'
-  NAMESPACE = 'NAMESPACE'
-  TYPE = 'TYPE'
-  NONE = 'NONE'
-  VALUE = 'VALUE'
-  ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'
-  CLUSTER_KEY = 'CLUSTER_KEY'
-  TABLE_CFS = 'TABLE_CFS'
-  CONFIG = 'CONFIG'
-  DATA = 'DATA'
-
-  # Load constants from hbase java API
-  def self.promote_constants(constants)
-    # The constants to import are all in uppercase
-    constants.each do |c|
-      next if c =~ /DEFAULT_.*/ || c != c.upcase
-      next if eval("defined?(#{c})")
-      eval("#{c} = '#{c}'")
-    end
-  end
-
-  promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
-  promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants)
-end
-
-# Include classes definition
-require 'hbase/hbase'
-require 'hbase/admin'
-require 'hbase/taskmonitor'
-require 'hbase/table'
-require 'hbase/quotas'
-require 'hbase/replication_admin'
-require 'hbase/security'
-require 'hbase/visibility_labels'
-require 'hbase/rsgroup_admin'
-
-include HBaseQuotasConstants

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 35039af..f32376d 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -31,10 +31,10 @@ module Hbase
   class Admin
     include HBaseConstants
 
-    def initialize(admin, formatter)
-      @admin = admin
-      @connection = @admin.getConnection()
-      @formatter = formatter
+    def initialize(connection)
+      @connection = connection
+      # Java Admin instance
+      @admin = @connection.getAdmin
     end
 
     def close
@@ -309,12 +309,6 @@ module Hbase
     end
 
     #----------------------------------------------------------------------------------------------
-    # Parse arguments and update HTableDescriptor accordingly
-    def parse_htd_args(htd, arg)
-      htd.setNormalizationEnabled(JBoolean.valueOf(arg.delete(NORMALIZATION_ENABLED))) if arg[NORMALIZATION_ENABLED]
-    end
-
-    #----------------------------------------------------------------------------------------------
     # Creates a table
     def create(table_name, *args)
       # Fail if table name is not a string
@@ -392,24 +386,7 @@ module Hbase
         end
 
         # Done with splits; apply formerly-table_att parameters.
-        htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
-        htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
-        htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
-        htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
-        htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
-        # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY.  To keep backward compatible, it still exists.
-        # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set
-        if arg.include?(DEFERRED_LOG_FLUSH)
-          if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE"
-            htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL"))
-          else
-            htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL"))
-          end
-        end
-        htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
-        parse_htd_args(htd, arg)
-        set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
-        set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+        update_htd_from_arg(htd, arg)
 
         arg.each_key do |ignored_key|
           puts("An argument ignored (unknown or overridden): %s" % [ ignored_key ])
@@ -653,26 +630,7 @@ module Hbase
         end
 
         # 3) Some args for the table, optionally with METHOD => table_att (deprecated)
-        raise(ArgumentError, "NAME argument in an unexpected place") if name
-        htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
-        htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
-        htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
-        htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
-        parse_htd_args(htd, arg)
-        htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
-        # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY.  To keep backward compatible, it still exists.
-        # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set
-        if arg.include?(DEFERRED_LOG_FLUSH)
-          if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE"
-            htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL"))
-          else
-            htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL"))
-          end
-        end
-        htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
-        htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION]
-        set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
-        set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+        update_htd_from_arg(htd, arg)
 
         # set a coprocessor attribute
         valid_coproc_keys = []
@@ -764,7 +722,7 @@ module Hbase
             rLoadSink = sl.getReplicationLoadSink()
             rSinkString << " AgeOfLastAppliedOp=" + rLoadSink.getAgeOfLastAppliedOp().to_s
             rSinkString << ", TimeStampsOfLastAppliedOp=" +
-			    (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString()
+                (java.util.Date.new(rLoadSink.getTimeStampsOfLastAppliedOp())).toString()
             rLoadSourceList = sl.getReplicationLoadSourceList()
             index = 0
             while index < rLoadSourceList.size()
@@ -773,7 +731,7 @@ module Hbase
               rSourceString << ", AgeOfLastShippedOp=" + rLoadSource.getAgeOfLastShippedOp().to_s
               rSourceString << ", SizeOfLogQueue=" + rLoadSource.getSizeOfLogQueue().to_s
               rSourceString << ", TimeStampsOfLastShippedOp=" +
-			      (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString()
+                  (java.util.Date.new(rLoadSource.getTimeStampOfLastShippedOp())).toString()
               rSourceString << ", Replication Lag=" + rLoadSource.getReplicationLag().to_s
               index = index + 1
             end
@@ -1186,5 +1144,29 @@ module Hbase
     def list_procedures()
       @admin.listProcedures()
     end
+
+    # Parse arguments and update HTableDescriptor accordingly
+    def update_htd_from_arg(htd, arg)
+      htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
+      htd.setMaxFileSize(JLong.valueOf(arg.delete(MAX_FILESIZE))) if arg[MAX_FILESIZE]
+      htd.setReadOnly(JBoolean.valueOf(arg.delete(READONLY))) if arg[READONLY]
+      htd.setCompactionEnabled(JBoolean.valueOf(arg[COMPACTION_ENABLED])) if arg[COMPACTION_ENABLED]
+      htd.setNormalizationEnabled(
+        JBoolean.valueOf(arg[NORMALIZATION_ENABLED])) if arg[NORMALIZATION_ENABLED]
+      htd.setMemStoreFlushSize(JLong.valueOf(arg.delete(MEMSTORE_FLUSHSIZE))) if arg[MEMSTORE_FLUSHSIZE]
+      # DEFERRED_LOG_FLUSH is deprecated and was replaced by DURABILITY.  To keep backward compatible, it still exists.
+      # However, it has to be set before DURABILITY so that DURABILITY could overwrite if both args are set
+      if arg.include?(DEFERRED_LOG_FLUSH)
+        if arg.delete(DEFERRED_LOG_FLUSH).to_s.upcase == "TRUE"
+          htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("ASYNC_WAL"))
+        else
+          htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf("SYNC_WAL"))
+        end
+      end
+      htd.setDurability(org.apache.hadoop.hbase.client.Durability.valueOf(arg.delete(DURABILITY))) if arg[DURABILITY]
+      htd.setRegionReplication(JInteger.valueOf(arg.delete(REGION_REPLICATION))) if arg[REGION_REPLICATION]
+      set_user_metadata(htd, arg.delete(METADATA)) if arg[METADATA]
+      set_descriptor_config(htd, arg.delete(CONFIGURATION)) if arg[CONFIGURATION]
+    end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/hbase.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/hbase.rb b/hbase-shell/src/main/ruby/hbase/hbase.rb
index 852f349..bc5a31d 100644
--- a/hbase-shell/src/main/ruby/hbase/hbase.rb
+++ b/hbase-shell/src/main/ruby/hbase/hbase.rb
@@ -18,6 +18,8 @@
 #
 
 include Java
+java_import org.apache.hadoop.hbase.client.ConnectionFactory
+java_import org.apache.hadoop.hbase.HBaseConfiguration
 
 require 'hbase/admin'
 require 'hbase/table'
@@ -35,25 +37,25 @@ module Hbase
       if config
         self.configuration = config
       else
-        self.configuration = org.apache.hadoop.hbase.HBaseConfiguration.create
+        self.configuration = HBaseConfiguration.create
         # Turn off retries in hbase and ipc.  Human doesn't want to wait on N retries.
         configuration.setInt("hbase.client.retries.number", 7)
         configuration.setInt("hbase.ipc.client.connect.max.retries", 3)
       end
-      @connection = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(
-          self.configuration)
+      @connection = ConnectionFactory.createConnection(self.configuration)
     end
 
-    def admin(formatter)
-      ::Hbase::Admin.new(@connection.getAdmin, formatter)
+    # Returns ruby's Admin class from admin.rb
+    def admin()
+      ::Hbase::Admin.new(@connection)
     end
 
-    def rsgroup_admin(formatter)
-      ::Hbase::RSGroupAdmin.new(@connection, formatter)
+    def rsgroup_admin()
+      ::Hbase::RSGroupAdmin.new(@connection)
     end
 
-    def taskmonitor(formatter)
-      ::Hbase::TaskMonitor.new(configuration, formatter)
+    def taskmonitor()
+      ::Hbase::TaskMonitor.new(configuration)
     end
 
     # Create new one each time
@@ -61,20 +63,20 @@ module Hbase
       ::Hbase::Table.new(@connection.getTable(TableName.valueOf(table)), shell)
     end
 
-    def replication_admin(formatter)
-      ::Hbase::RepAdmin.new(configuration, formatter)
+    def replication_admin()
+      ::Hbase::RepAdmin.new(configuration)
     end
 
-    def security_admin(formatter)
-      ::Hbase::SecurityAdmin.new(@connection.getAdmin, formatter)
+    def security_admin()
+      ::Hbase::SecurityAdmin.new(@connection.getAdmin)
     end
 
-    def visibility_labels_admin(formatter)
-      ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin, formatter)
+    def visibility_labels_admin()
+      ::Hbase::VisibilityLabelsAdmin.new(@connection.getAdmin)
     end
 
-    def quotas_admin(formatter)
-      ::Hbase::QuotasAdmin.new(@connection.getAdmin, formatter)
+    def quotas_admin()
+      ::Hbase::QuotasAdmin.new(@connection.getAdmin)
     end
 
     def shutdown

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/quotas.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/quotas.rb b/hbase-shell/src/main/ruby/hbase/quotas.rb
index 0be428d..bf2dc63 100644
--- a/hbase-shell/src/main/ruby/hbase/quotas.rb
+++ b/hbase-shell/src/main/ruby/hbase/quotas.rb
@@ -36,9 +36,8 @@ end
 
 module Hbase
   class QuotasAdmin
-    def initialize(admin, formatter)
+    def initialize(admin)
       @admin = admin
-      @formatter = formatter
     end
 
     def close

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index e91a4f7..7eae7af 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -31,10 +31,9 @@ module Hbase
   class RepAdmin
     include HBaseConstants
 
-    def initialize(configuration, formatter)
+    def initialize(configuration)
       @replication_admin = ReplicationAdmin.new(configuration)
       @configuration = configuration
-      @formatter = formatter
     end
 
     #----------------------------------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
index 51a4efb..c654f23 100644
--- a/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/rsgroup_admin.rb
@@ -28,9 +28,8 @@ module Hbase
   class RSGroupAdmin
     include HBaseConstants
 
-    def initialize(connection, formatter)
+    def initialize(connection)
       @admin = org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
-      @formatter = formatter
     end
 
     def close

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/security.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/security.rb b/hbase-shell/src/main/ruby/hbase/security.rb
index 7a31851..55519ed 100644
--- a/hbase-shell/src/main/ruby/hbase/security.rb
+++ b/hbase-shell/src/main/ruby/hbase/security.rb
@@ -24,10 +24,9 @@ module Hbase
   class SecurityAdmin
     include HBaseConstants
 
-    def initialize(admin, formatter)
+    def initialize(admin)
       @admin = admin
       @connection = @admin.getConnection()
-      @formatter = formatter
     end
 
     def close

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/table.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb b/hbase-shell/src/main/ruby/hbase/table.rb
index 6cb5dcb..e64b4ee 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -296,7 +296,7 @@ EOF
       # Parse arguments
       #
       unless args.kind_of?(Hash)
-        raise ArgumentError, "Failed parse of of #{args.inspect}, #{args.class}"
+        raise ArgumentError, "Failed parse of #{args.inspect}, #{args.class}"
       end
 
       # Get maxlength parameter if passed

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/taskmonitor.rb b/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
index df93848..d312558 100644
--- a/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
+++ b/hbase-shell/src/main/ruby/hbase/taskmonitor.rb
@@ -71,9 +71,8 @@ module Hbase
     end
 
 
-    def initialize(configuration, formatter)
+    def initialize(configuration)
       @conf = configuration
-      @formatter = formatter
       @conn = org.apache.hadoop.hbase.client.ConnectionFactory.createConnection(@conf)
       @admin = @conn.getAdmin()
     end

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase/visibility_labels.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/visibility_labels.rb b/hbase-shell/src/main/ruby/hbase/visibility_labels.rb
index 98bfb2c..8e6c93c 100644
--- a/hbase-shell/src/main/ruby/hbase/visibility_labels.rb
+++ b/hbase-shell/src/main/ruby/hbase/visibility_labels.rb
@@ -24,9 +24,8 @@ java_import org.apache.hadoop.hbase.util.Bytes
 module Hbase
   class VisibilityLabelsAdmin
 
-    def initialize(admin, formatter)
+    def initialize(admin)
       @admin = admin
-      @formatter = formatter
       @connection = @admin.getConnection()
     end
 
@@ -41,7 +40,7 @@ module Hbase
         labels = [ args ].flatten.compact
       end
       if labels.size() == 0
-      	raise(ArgumentError, "Arguments cannot be null")
+        raise(ArgumentError, "Arguments cannot be null")
       end
 
       begin

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/hbase_constants.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
new file mode 100644
index 0000000..bc6f37c
--- /dev/null
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -0,0 +1,109 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# HBase ruby classes.
+# Has wrapper classes for org.apache.hadoop.hbase.client.Admin
+# and for org.apache.hadoop.hbase.client.Table.  Classes take
+# Formatters on construction and outputs any results using
+# Formatter methods.  These classes are only really for use by
+# the hirb.rb HBase Shell script; they don't make much sense elsewhere.
+# For example, the exists method on Admin class prints to the formatter
+# whether the table exists and returns nil regardless.
+include Java
+
+include_class('java.lang.Integer') {|package,name| "J#{name}" }
+include_class('java.lang.Long') {|package,name| "J#{name}" }
+include_class('java.lang.Boolean') {|package,name| "J#{name}" }
+
+module HBaseConstants
+  COLUMN = "COLUMN"
+  COLUMNS = "COLUMNS"
+  TIMESTAMP = "TIMESTAMP"
+  TIMERANGE = "TIMERANGE"
+  NAME = org.apache.hadoop.hbase.HConstants::NAME
+  VERSIONS = org.apache.hadoop.hbase.HConstants::VERSIONS
+  IN_MEMORY = org.apache.hadoop.hbase.HConstants::IN_MEMORY
+  IN_MEMORY_COMPACTION = org.apache.hadoop.hbase.HColumnDescriptor::IN_MEMORY_COMPACTION
+  METADATA = org.apache.hadoop.hbase.HConstants::METADATA
+  STOPROW = "STOPROW"
+  STARTROW = "STARTROW"
+  ROWPREFIXFILTER = "ROWPREFIXFILTER"
+  ENDROW = STOPROW
+  RAW = "RAW"
+  LIMIT = "LIMIT"
+  METHOD = "METHOD"
+  MAXLENGTH = "MAXLENGTH"
+  CACHE_BLOCKS = "CACHE_BLOCKS"
+  ALL_METRICS = "ALL_METRICS"
+  METRICS = "METRICS"
+  REVERSED = "REVERSED"
+  REPLICATION_SCOPE = "REPLICATION_SCOPE"
+  INTERVAL = 'INTERVAL'
+  CACHE = 'CACHE'
+  FILTER = 'FILTER'
+  SPLITS = 'SPLITS'
+  SPLITS_FILE = 'SPLITS_FILE'
+  SPLITALGO = 'SPLITALGO'
+  NUMREGIONS = 'NUMREGIONS'
+  REGION_REPLICATION = 'REGION_REPLICATION'
+  REGION_REPLICA_ID = 'REGION_REPLICA_ID'
+  CONFIGURATION = org.apache.hadoop.hbase.HConstants::CONFIGURATION
+  ATTRIBUTES="ATTRIBUTES"
+  VISIBILITY="VISIBILITY"
+  AUTHORIZATIONS = "AUTHORIZATIONS"
+  SKIP_FLUSH = 'SKIP_FLUSH'
+  CONSISTENCY = "CONSISTENCY"
+  USER = 'USER'
+  TABLE = 'TABLE'
+  NAMESPACE = 'NAMESPACE'
+  TYPE = 'TYPE'
+  NONE = 'NONE'
+  VALUE = 'VALUE'
+  ENDPOINT_CLASSNAME = 'ENDPOINT_CLASSNAME'
+  CLUSTER_KEY = 'CLUSTER_KEY'
+  TABLE_CFS = 'TABLE_CFS'
+  CONFIG = 'CONFIG'
+  DATA = 'DATA'
+
+  # Load constants from hbase java API
+  def self.promote_constants(constants)
+    # The constants to import are all in uppercase
+    constants.each do |c|
+      next if c =~ /DEFAULT_.*/ || c != c.upcase
+      next if eval("defined?(#{c})")
+      eval("#{c} = '#{c}'")
+    end
+  end
+
+  promote_constants(org.apache.hadoop.hbase.HColumnDescriptor.constants)
+  promote_constants(org.apache.hadoop.hbase.HTableDescriptor.constants)
+end
+
+# Include classes definition
+require 'hbase/hbase'
+require 'hbase/admin'
+require 'hbase/taskmonitor'
+require 'hbase/table'
+require 'hbase/quotas'
+require 'hbase/replication_admin'
+require 'hbase/security'
+require 'hbase/visibility_labels'
+require 'hbase/rsgroup_admin'
+
+include HBaseQuotasConstants

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 1927333..fa1f8b8 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -70,25 +70,23 @@ module Shell
   #----------------------------------------------------------------------
   class Shell
     attr_accessor :hbase
-    attr_accessor :formatter
     attr_accessor :interactive
     alias interactive? interactive
 
     @debug = false
     attr_accessor :debug
 
-    def initialize(hbase, formatter, interactive=true)
+    def initialize(hbase, interactive=true)
       self.hbase = hbase
-      self.formatter = formatter
       self.interactive = interactive
     end
 
     def hbase_admin
-      @hbase_admin ||= hbase.admin(formatter)
+      @hbase_admin ||= hbase.admin()
     end
 
     def hbase_taskmonitor
-      @hbase_taskmonitor ||= hbase.taskmonitor(formatter)
+      @hbase_taskmonitor ||= hbase.taskmonitor()
     end
 
     def hbase_table(name)
@@ -96,23 +94,23 @@ module Shell
     end
 
     def hbase_replication_admin
-      @hbase_replication_admin ||= hbase.replication_admin(formatter)
+      @hbase_replication_admin ||= hbase.replication_admin()
     end
 
     def hbase_security_admin
-      @hbase_security_admin ||= hbase.security_admin(formatter)
+      @hbase_security_admin ||= hbase.security_admin()
     end
 
     def hbase_visibility_labels_admin
-      @hbase_visibility_labels_admin ||= hbase.visibility_labels_admin(formatter)
+      @hbase_visibility_labels_admin ||= hbase.visibility_labels_admin()
     end
 
     def hbase_quotas_admin
-      @hbase_quotas_admin ||= hbase.quotas_admin(formatter)
+      @hbase_quotas_admin ||= hbase.quotas_admin()
     end
 
     def hbase_rsgroup_admin
-      @rsgroup_admin ||= hbase.rsgroup_admin(formatter)
+      @rsgroup_admin ||= hbase.rsgroup_admin()
     end
 
     def export_commands(where)
@@ -140,7 +138,7 @@ module Shell
       internal_command(command, :command, *args)
     end
 
-    #call a specific internal method in the command instance
+    # call a specific internal method in the command instance
     # command  - name of the command to call
     # method_name - name of the method on the command to call. Defaults to just 'command'
     # args - to be passed to the named method
@@ -149,8 +147,9 @@ module Shell
     end
 
     def print_banner
-      puts "HBase Shell; enter 'help<RETURN>' for list of supported commands."
-      puts 'Type "exit<RETURN>" to leave the HBase Shell'
+      puts 'HBase Shell'
+      puts 'Use "help" to get list of supported commands.'
+      puts 'Use "exit" to quit this interactive shell.'
       print 'Version '
       command('version')
       puts

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/main/ruby/shell/commands.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index b7b1705..b9446dd 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -26,7 +26,7 @@ module Shell
       end
 
       #wrap an execution of cmd to catch hbase exceptions
-      # cmd - command name to execture
+      # cmd - command name to execute
       # args - arguments to pass to the command
       def command_safe(debug, cmd = :command, *args)
         # send is internal ruby method to call 'cmd' with *args
@@ -50,6 +50,9 @@ module Shell
         end
       end
 
+      # Convenience functions to get different admins
+
+      # Returns HBase::Admin ruby class.
       def admin
         @shell.hbase_admin
       end
@@ -83,9 +86,9 @@ module Shell
       end
 
       #----------------------------------------------------------------------
-
+      # Creates formatter instance first time and then reuses it.
       def formatter
-        @shell.formatter
+        @formatter ||= ::Shell::Formatter::Console.new
       end
 
       def format_simple_command

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/admin_test.rb b/hbase-shell/src/test/ruby/hbase/admin_test.rb
index 50a65d0..e2c3bc0 100644
--- a/hbase-shell/src/test/ruby/hbase/admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/admin_test.rb
@@ -18,9 +18,8 @@
 #
 
 require 'shell'
-require 'shell/formatter'
 require 'stringio'
-require 'hbase'
+require 'hbase_constants'
 require 'hbase/hbase'
 require 'hbase/table'
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/hbase_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/hbase_test.rb b/hbase-shell/src/test/ruby/hbase/hbase_test.rb
index 185ec3e..0f19234 100644
--- a/hbase-shell/src/test/ruby/hbase/hbase_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/hbase_test.rb
@@ -17,12 +17,11 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 
 module Hbase
   class HbaseTest < Test::Unit::TestCase
     def setup
-      @formatter = Shell::Formatter::Console.new()
       @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
     end
 
@@ -31,19 +30,19 @@ module Hbase
     end
 
     define_test "Hbase::Hbase#admin should create a new admin object when called the first time" do
-      assert_kind_of(::Hbase::Admin, @hbase.admin(@formatter))
+      assert_kind_of(::Hbase::Admin, @hbase.admin())
     end
 
     define_test "Hbase::Hbase#admin should create a new admin object every call" do
-      assert_not_same(@hbase.admin(@formatter), @hbase.admin(@formatter))
+      assert_not_same(@hbase.admin(), @hbase.admin())
     end
 
     define_test "Hbase::Hbase#table should create a new table object when called the first time" do
-      assert_kind_of(::Hbase::Table, @hbase.table('hbase:meta', @formatter))
+      assert_kind_of(::Hbase::Table, @hbase.table('hbase:meta', @shell))
     end
 
     define_test "Hbase::Hbase#table should create a new table object every call" do
-      assert_not_same(@hbase.table('hbase:meta', @formatter), @hbase.table('hbase:meta', @formatter))
+      assert_not_same(@hbase.table('hbase:meta', @shell), @hbase.table('hbase:meta', @shell))
     end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 0c026d6..d00dbc5 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -18,8 +18,7 @@
 #
 
 require 'shell'
-require 'shell/formatter'
-require 'hbase'
+require 'hbase_constants'
 require 'hbase/hbase'
 require 'hbase/table'
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/security_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb
index 6ecfb98..be5bbae 100644
--- a/hbase-shell/src/test/ruby/hbase/security_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/security_admin_test.rb
@@ -18,8 +18,7 @@
 #
 
 require 'shell'
-require 'shell/formatter'
-require 'hbase'
+require 'hbase_constants'
 require 'hbase/hbase'
 require 'hbase/table'
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/table_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb b/hbase-shell/src/test/ruby/hbase/table_test.rb
index a617bc5..faf9827 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 
 include HBaseConstants
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb b/hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb
index 78776d8..cdb91c7 100644
--- a/hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/taskmonitor_test.rb
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 
 module Hbase
   class TaskMonitorTest < Test::Unit::TestCase

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
index 47ac292..0046909 100644
--- a/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/visibility_labels_admin_test.rb
@@ -18,8 +18,7 @@
 #
 
 require 'shell'
-require 'shell/formatter'
-require 'hbase'
+require 'hbase_constants'
 require 'hbase/hbase'
 require 'hbase/table'
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/shell/commands_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/commands_test.rb b/hbase-shell/src/test/ruby/shell/commands_test.rb
index 3f6a802..9fa291a 100644
--- a/hbase-shell/src/test/ruby/shell/commands_test.rb
+++ b/hbase-shell/src/test/ruby/shell/commands_test.rb
@@ -17,7 +17,7 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 require 'hbase/table'
 require 'shell'
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/shell/noninteractive_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb
index 14bdbc7..0fae4cb 100644
--- a/hbase-shell/src/test/ruby/shell/noninteractive_test.rb
+++ b/hbase-shell/src/test/ruby/shell/noninteractive_test.rb
@@ -14,15 +14,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-require 'hbase'
+require 'hbase_constants'
 require 'shell'
-require 'shell/formatter'
 
 class NonInteractiveTest < Test::Unit::TestCase
   def setup
-    @formatter = ::Shell::Formatter::Console.new()
     @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
-    @shell = Shell::Shell.new(@hbase, @formatter, false)
+    @shell = Shell::Shell.new(@hbase, false)
   end
 
   define_test "Shell::Shell noninteractive mode should throw" do

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
index 1040ed8..cb76c1f 100644
--- a/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/rsgroup_shell_test.rb
@@ -17,16 +17,14 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 require 'shell'
-require 'shell/formatter'
 
 module Hbase
   class RSGroupShellTest < Test::Unit::TestCase
     def setup
-      @formatter = ::Shell::Formatter::Console.new
       @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
-      @shell = Shell::Shell.new(@hbase, @formatter)
+      @shell = Shell::Shell.new(@hbase)
       connection = $TEST_CLUSTER.getConnection
       @rsgroup_admin =
           org.apache.hadoop.hbase.rsgroup.RSGroupAdmin.newClient(connection)
@@ -65,7 +63,7 @@ module Hbase
       assert_equal(1, @rsgroup_admin.getRSGroupInfo(group_name).getTables.count)
 
       count = 0
-      @hbase.rsgroup_admin(@formatter).get_rsgroup(group_name) do |line|
+      @hbase.rsgroup_admin().get_rsgroup(group_name) do |line|
         case count
         when 1
           assert_equal(hostPortStr, line)
@@ -77,22 +75,22 @@ module Hbase
       assert_equal(4, count)
 
       assert_equal(2,
-                   @hbase.rsgroup_admin(@formatter).list_rs_groups.count)
+                   @hbase.rsgroup_admin().list_rs_groups.count)
 
       # just run it to verify jruby->java api binding
-      @hbase.rsgroup_admin(@formatter).balance_rs_group(group_name)
+      @hbase.rsgroup_admin().balance_rs_group(group_name)
     end
 
     # we test exceptions that could be thrown by the ruby wrappers
     define_test 'Test bogus arguments' do
       assert_raise(ArgumentError) do
-        @hbase.rsgroup_admin(@formatter).get_rsgroup('foobar')
+        @hbase.rsgroup_admin().get_rsgroup('foobar')
       end
       assert_raise(ArgumentError) do
-        @hbase.rsgroup_admin(@formatter).get_rsgroup_of_server('foobar:123')
+        @hbase.rsgroup_admin().get_rsgroup_of_server('foobar:123')
       end
       assert_raise(ArgumentError) do
-        @hbase.rsgroup_admin(@formatter).get_rsgroup_of_table('foobar')
+        @hbase.rsgroup_admin().get_rsgroup_of_table('foobar')
       end
     end
   end

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/shell/shell_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/shell_test.rb b/hbase-shell/src/test/ruby/shell/shell_test.rb
index 56b7dc8..f37f60c 100644
--- a/hbase-shell/src/test/ruby/shell/shell_test.rb
+++ b/hbase-shell/src/test/ruby/shell/shell_test.rb
@@ -17,15 +17,13 @@
 # limitations under the License.
 #
 
-require 'hbase'
+require 'hbase_constants'
 require 'shell'
-require 'shell/formatter'
 
 class ShellTest < Test::Unit::TestCase
   def setup
-    @formatter = ::Shell::Formatter::Console.new()
     @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
-    @shell = Shell::Shell.new(@hbase, @formatter)
+    @shell = Shell::Shell.new(@hbase)
   end
 
   define_test "Shell::Shell#hbase_admin should return an admin instance" do

http://git-wip-us.apache.org/repos/asf/hbase/blob/bdb46f01/hbase-shell/src/test/ruby/test_helper.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/test_helper.rb b/hbase-shell/src/test/ruby/test_helper.rb
index 3a5193a..11645d5 100644
--- a/hbase-shell/src/test/ruby/test_helper.rb
+++ b/hbase-shell/src/test/ruby/test_helper.rb
@@ -37,15 +37,13 @@ end
 
 module Hbase
   module TestHelpers
-    require 'hbase'
+    require 'hbase_constants'
     require 'hbase/hbase'
     require 'shell'
-    require 'shell/formatter'
 
     def setup_hbase
-      formatter = ::Shell::Formatter::Console.new
       hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
-      @shell = ::Shell::Shell.new(hbase, formatter)
+      @shell = ::Shell::Shell.new(hbase)
     end
     
     def shutdown
@@ -72,6 +70,10 @@ module Hbase
       @shell.hbase_visibility_labels_admin
     end
 
+    def quotas_admin
+      @shell.hbase_quotas_admin
+    end
+
     def replication_admin
       @shell.hbase_replication_admin
     end


[36/50] hbase git commit: HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers" Adds HADOOP-9955 RPC idle connection closing is extremely inefficient

Posted by sy...@apache.org.
HBASE-15948 Port "HADOOP-9956 RPC listener inefficiently assigns connections to readers" Adds HADOOP-9955 RPC idle connection closing is extremely inefficient

Changes how we do accounting of Connections to match how it is done in Hadoop.
Adds a ConnectionManager class. Adds new configurations for this new class.

"hbase.ipc.client.idlethreshold" 4000
"hbase.ipc.client.connection.idle-scan-interval.ms" 10000
"hbase.ipc.client.connection.maxidletime" 10000
"hbase.ipc.client.kill.max", 10
"hbase.ipc.server.handler.queue.size", 100

The new scheme does away with synchronization that purportedly would freeze out
reads while we were cleaning up stale connections (according to HADOOP-9955)

Also adds in new mechanism for accepting Connections by pulling in as many
as we can at a time adding them to a Queue instead of doing one at a time.
Can help when bursty traffic according to HADOOP-9956. Removes a blocking
while Reader is busy parsing a request. Adds configuration
"hbase.ipc.server.read.connection-queue.size" with default of 100 for
queue size.

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a95552c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a95552c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a95552c

Branch: refs/heads/hbase-12439
Commit: 3a95552cfe6205ae845e1a7e1b5907da55b1a044
Parents: e66ecd7
Author: stack <st...@apache.org>
Authored: Thu Jun 2 16:59:52 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue Jun 7 16:42:21 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/MetricsHBaseServerSource.java     |  10 +-
 .../ipc/MetricsHBaseServerWrapperImpl.java      |   6 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 421 +++++++++++--------
 .../regionserver/SimpleRpcSchedulerFactory.java |   2 +-
 .../hadoop/hbase/ipc/AbstractTestIPC.java       |   2 +-
 5 files changed, 255 insertions(+), 186 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a95552c/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index bb89789..ce57e0f 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -52,14 +52,16 @@ public interface MetricsHBaseServerSource extends BaseSource {
   String TOTAL_CALL_TIME_NAME = "totalCallTime";
   String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time.";
   String QUEUE_SIZE_NAME = "queueSize";
-  String QUEUE_SIZE_DESC = "Number of bytes in the call queues.";
+  String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " +
+    "parsed and is waiting to run or is currently being executed.";
   String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue";
-  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue.";
+  String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " +
+    "parsed requests waiting in scheduler to be executed";
   String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue";
   String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue";
   String REPLICATION_QUEUE_DESC =
-      "Number of calls in the replication call queue.";
-  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
+      "Number of calls in the replication call queue waiting to be run";
+  String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a95552c/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
index 9979c75..4f53709 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapperImpl.java
@@ -36,7 +36,7 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
     if (!isServerStarted()) {
       return 0;
     }
-    return server.callQueueSize.get();
+    return server.callQueueSizeInBytes.get();
   }
 
   @Override
@@ -65,10 +65,10 @@ public class MetricsHBaseServerWrapperImpl implements MetricsHBaseServerWrapper
 
   @Override
   public int getNumOpenConnections() {
-    if (!isServerStarted() || this.server.connectionList == null) {
+    if (!isServerStarted()) {
       return 0;
     }
-    return server.connectionList.size();
+    return server.getNumOpenConnections();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a95552c/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 483ce86..1087c42 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -48,15 +48,17 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
@@ -113,6 +115,7 @@ import org.apache.hadoop.hbase.util.Counter;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
@@ -183,11 +186,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   static final int DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER = 10;
 
-  /**
-   * The maximum size that we can hold in the RPC queue
-   */
-  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
-
   private final IPCUtil ipcUtil;
 
   private static final String AUTH_FAILED_FOR = "Auth failed for ";
@@ -210,22 +208,30 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   protected int port;                             // port we listen on
   protected InetSocketAddress address;            // inet address we listen on
   private int readThreads;                        // number of read threads
-  protected int maxIdleTime;                      // the maximum idle time after
-                                                  // which a client may be
-                                                  // disconnected
-  protected int thresholdIdleConnections;         // the number of idle
-                                                  // connections after which we
-                                                  // will start cleaning up idle
-                                                  // connections
-  int maxConnectionsToNuke;                       // the max number of
-                                                  // connections to nuke
-                                                  // during a cleanup
-
   protected MetricsHBaseServer metrics;
 
   protected final Configuration conf;
 
-  private int maxQueueSize;
+  /**
+   * Maximum size in bytes of the currently queued and running Calls. If a new Call puts us over
+   * this size, then we will reject the call (after parsing it though). It will go back to the
+   * client and client will retry. Set this size with "hbase.ipc.server.max.callqueue.size". The
+   * call queue size gets incremented after we parse a call and before we add it to the queue of
+   * calls for the scheduler to use. It get decremented after we have 'run' the Call. The current
+   * size is kept in {@link #callQueueSizeInBytes}.
+   * @see {@link #callQueueSizeInBytes}
+   * @see {@link #DEFAULT_MAX_CALLQUEUE_SIZE}
+   * @see {@link #callQueueSizeInBytes}
+   */
+  private final long maxQueueSizeInBytes;
+  private static final int DEFAULT_MAX_CALLQUEUE_SIZE = 1024 * 1024 * 1024;
+
+  /**
+   * This is a running count of the size in bytes of all outstanding calls whether currently
+   * executing or queued waiting to be run.
+   */
+  protected final Counter callQueueSizeInBytes = new Counter();
+
   protected int socketSendBufferSize;
   protected final boolean tcpNoDelay;   // if T then disable Nagle's Algorithm
   protected final boolean tcpKeepAlive; // if T then use keepalives
@@ -244,19 +250,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
    */
   volatile boolean started = false;
 
-  /**
-   * This is a running count of the size of all outstanding calls by size.
-   */
-  protected final Counter callQueueSize = new Counter();
-
-  protected final List<Connection> connectionList =
-    Collections.synchronizedList(new LinkedList<Connection>());
-  //maintain a list
-  //of client connections
+  // maintains the set of client connections and handles idle timeouts
+  private ConnectionManager connectionManager;
   private Listener listener = null;
   protected Responder responder = null;
   protected AuthenticationTokenSecretManager authTokenSecretMgr = null;
-  protected int numConnections = 0;
 
   protected HBaseRPCErrorHandler errorHandler = null;
 
@@ -623,18 +621,16 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     private Selector selector = null; //the selector that we use for the server
     private Reader[] readers = null;
     private int currentReader = 0;
-    private Random rand = new Random();
-    private long lastCleanupRunTime = 0; //the last time when a cleanup connec-
-                                         //-tion (for idle connections) ran
-    private long cleanupInterval = 10000; //the minimum interval between
-                                          //two cleanup runs
-    private int backlogLength;
+    private final int readerPendingConnectionQueueLength;
 
     private ExecutorService readPool;
 
     public Listener(final String name) throws IOException {
       super(name);
-      backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+      // The backlog of requests that we will have the serversocket carry.
+      int backlogLength = conf.getInt("hbase.ipc.server.listen.queue.size", 128);
+      readerPendingConnectionQueueLength =
+          conf.getInt("hbase.ipc.server.read.connection-queue.size", 100);
       // Create a new server socket and set to non blocking mode
       acceptChannel = ServerSocketChannel.open();
       acceptChannel.configureBlocking(false);
@@ -644,9 +640,11 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       port = acceptChannel.socket().getLocalPort(); //Could be an ephemeral port
       address = (InetSocketAddress)acceptChannel.socket().getLocalSocketAddress();
       // create a selector;
-      selector= Selector.open();
+      selector = Selector.open();
 
       readers = new Reader[readThreads];
+      // Why this executor thing? Why not like hadoop just start up all the threads? I suppose it
+      // has an advantage in that it is easy to shutdown the pool.
       readPool = Executors.newFixedThreadPool(readThreads,
         new ThreadFactoryBuilder().setNameFormat(
           "RpcServer.reader=%d,bindAddress=" + bindAddress.getHostName() +
@@ -667,12 +665,15 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
 
     private class Reader implements Runnable {
-      private volatile boolean adding = false;
+      final private LinkedBlockingQueue<Connection> pendingConnections;
       private final Selector readSelector;
 
       Reader() throws IOException {
+        this.pendingConnections =
+          new LinkedBlockingQueue<Connection>(readerPendingConnectionQueueLength);
         this.readSelector = Selector.open();
       }
+
       @Override
       public void run() {
         try {
@@ -689,11 +690,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       private synchronized void doRunLoop() {
         while (running) {
           try {
-            readSelector.select();
-            while (adding) {
-              this.wait(1000);
+            // Consume as many connections as currently queued to avoid
+            // unbridled acceptance of connections that starves the select
+            int size = pendingConnections.size();
+            for (int i=size; i>0; i--) {
+              Connection conn = pendingConnections.take();
+              conn.channel.register(readSelector, SelectionKey.OP_READ, conn);
             }
-
+            readSelector.select();
             Iterator<SelectionKey> iter = readSelector.selectedKeys().iterator();
             while (iter.hasNext()) {
               SelectionKey key = iter.next();
@@ -703,9 +707,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
                   doRead(key);
                 }
               }
+              key = null;
             }
           } catch (InterruptedException e) {
-            LOG.debug("Interrupted while sleeping");
+            if (running) {                      // unexpected -- log it
+              LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
+            }
             return;
           } catch (IOException ex) {
             LOG.info(getName() + ": IOException in Reader", ex);
@@ -714,76 +721,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
 
       /**
-       * This gets reader into the state that waits for the new channel
-       * to be registered with readSelector. If it was waiting in select()
-       * the thread will be woken up, otherwise whenever select() is called
-       * it will return even if there is nothing to read and wait
-       * in while(adding) for finishAdd call
+       * Updating the readSelector while it's being used is not thread-safe,
+       * so the connection must be queued.  The reader will drain the queue
+       * and update its readSelector before performing the next select
        */
-      public void startAdd() {
-        adding = true;
+      public void addConnection(Connection conn) throws IOException {
+        pendingConnections.add(conn);
         readSelector.wakeup();
       }
-
-      public synchronized SelectionKey registerChannel(SocketChannel channel)
-        throws IOException {
-        return channel.register(readSelector, SelectionKey.OP_READ);
-      }
-
-      public synchronized void finishAdd() {
-        adding = false;
-        this.notify();
-      }
-    }
-
-    /** cleanup connections from connectionList. Choose a random range
-     * to scan and also have a limit on the number of the connections
-     * that will be cleanedup per run. The criteria for cleanup is the time
-     * for which the connection was idle. If 'force' is true then all
-     * connections will be looked at for the cleanup.
-     * @param force all connections will be looked at for cleanup
-     */
-    private void cleanupConnections(boolean force) {
-      if (force || numConnections > thresholdIdleConnections) {
-        long currentTime = System.currentTimeMillis();
-        if (!force && (currentTime - lastCleanupRunTime) < cleanupInterval) {
-          return;
-        }
-        int start = 0;
-        int end = numConnections - 1;
-        if (!force) {
-          start = rand.nextInt() % numConnections;
-          end = rand.nextInt() % numConnections;
-          int temp;
-          if (end < start) {
-            temp = start;
-            start = end;
-            end = temp;
-          }
-        }
-        int i = start;
-        int numNuked = 0;
-        while (i <= end) {
-          Connection c;
-          synchronized (connectionList) {
-            try {
-              c = connectionList.get(i);
-            } catch (Exception e) {return;}
-          }
-          if (c.timedOut(currentTime)) {
-            if (LOG.isDebugEnabled())
-              LOG.debug(getName() + ": disconnecting client " + c.getHostAddress());
-            closeConnection(c);
-            numNuked++;
-            end--;
-            //noinspection UnusedAssignment
-            c = null;
-            if (!force && numNuked == maxConnectionsToNuke) break;
-          }
-          else i++;
-        }
-        lastCleanupRunTime = System.currentTimeMillis();
-      }
     }
 
     @Override
@@ -792,6 +737,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         "it will have per impact")
     public void run() {
       LOG.info(getName() + ": starting");
+      connectionManager.startIdleScan();
       while (running) {
         SelectionKey key = null;
         try {
@@ -815,7 +761,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             if (errorHandler.checkOOME(e)) {
               LOG.info(getName() + ": exiting on OutOfMemoryError");
               closeCurrentConnection(key, e);
-              cleanupConnections(true);
+              connectionManager.closeIdle(true);
               return;
             }
           } else {
@@ -824,22 +770,18 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
             // some thread(s) a chance to finish
             LOG.warn(getName() + ": OutOfMemoryError in server select", e);
             closeCurrentConnection(key, e);
-            cleanupConnections(true);
+            connectionManager.closeIdle(true);
             try {
               Thread.sleep(60000);
             } catch (InterruptedException ex) {
               LOG.debug("Interrupted while sleeping");
-              return;
             }
           }
         } catch (Exception e) {
           closeCurrentConnection(key, e);
         }
-        cleanupConnections(false);
       }
-
       LOG.info(getName() + ": stopping");
-
       synchronized (this) {
         try {
           acceptChannel.close();
@@ -851,10 +793,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         selector= null;
         acceptChannel= null;
 
-        // clean up all connections
-        while (!connectionList.isEmpty()) {
-          closeConnection(connectionList.remove(0));
-        }
+        // close all connections
+        connectionManager.stopIdleScan();
+        connectionManager.closeAll();
       }
     }
 
@@ -862,10 +803,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       if (key != null) {
         Connection c = (Connection)key.attachment();
         if (c != null) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": disconnecting client " + c.getHostAddress() +
-                (e != null ? " on error " + e.getMessage() : ""));
-          }
           closeConnection(c);
           key.attach(null);
         }
@@ -876,37 +813,24 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return address;
     }
 
-    void doAccept(SelectionKey key) throws IOException, OutOfMemoryError {
-      Connection c;
+    void doAccept(SelectionKey key) throws InterruptedException, IOException, OutOfMemoryError {
       ServerSocketChannel server = (ServerSocketChannel) key.channel();
-
       SocketChannel channel;
       while ((channel = server.accept()) != null) {
-        try {
-          channel.configureBlocking(false);
-          channel.socket().setTcpNoDelay(tcpNoDelay);
-          channel.socket().setKeepAlive(tcpKeepAlive);
-        } catch (IOException ioe) {
-          channel.close();
-          throw ioe;
-        }
-
+        channel.configureBlocking(false);
+        channel.socket().setTcpNoDelay(tcpNoDelay);
+        channel.socket().setKeepAlive(tcpKeepAlive);
         Reader reader = getReader();
-        try {
-          reader.startAdd();
-          SelectionKey readKey = reader.registerChannel(channel);
-          c = getConnection(channel, System.currentTimeMillis());
-          readKey.attach(c);
-          synchronized (connectionList) {
-            connectionList.add(numConnections, c);
-            numConnections++;
+        Connection c = connectionManager.register(channel);
+        // If the connectionManager can't take it, close the connection.
+        if (c == null) {
+          if (channel.isOpen()) {
+            IOUtils.cleanup(null, channel);
           }
-          if (LOG.isDebugEnabled())
-            LOG.debug(getName() + ": connection from " + c.toString() +
-                "; # active connections: " + numConnections);
-        } finally {
-          reader.finishAdd();
+          continue;
         }
+        key.attach(c);  // so closeCurrentConnection can get the object
+        reader.addConnection(c);
       }
     }
 
@@ -919,12 +843,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       c.setLastContact(System.currentTimeMillis());
       try {
         count = c.readAndProcess();
-
-        if (count > 0) {
-          c.setLastContact(System.currentTimeMillis());
-        }
-
       } catch (InterruptedException ieo) {
+        LOG.info(Thread.currentThread().getName() + ": readAndProcess caught InterruptedException", ieo);
         throw ieo;
       } catch (Exception e) {
         if (LOG.isDebugEnabled()) {
@@ -933,12 +853,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
         count = -1; //so that the (count < 0) block is executed
       }
       if (count < 0) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug(getName() + ": DISCONNECTING client " + c.toString() +
-              " because read count=" + count +
-              ". Number of active connections: " + numConnections);
-        }
         closeConnection(c);
+        c = null;
+      } else {
+        c.setLastContact(System.currentTimeMillis());
       }
     }
 
@@ -957,6 +875,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       readPool.shutdownNow();
     }
 
+    synchronized Selector getSelector() { return selector; }
+
     // The method that will return the next reader to work with
     // Simplistic implementation of round robin for now
     Reader getReader() {
@@ -1355,6 +1275,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       return null;
     }
 
+    public long getLastContact() {
+      return lastContact;
+    }
+
     /* Return true if the connection has no outstanding rpc */
     private boolean isIdle() {
       return rpcCount.get() == 0;
@@ -1370,10 +1294,6 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       rpcCount.increment();
     }
 
-    protected boolean timedOut(long currentTime) {
-      return isIdle() && currentTime - lastContact > maxIdleTime;
-    }
-
     private UserGroupInformation getAuthorizedUgi(String authorizedId)
         throws IOException {
       UserGroupInformation authorizedUgi;
@@ -1883,7 +1803,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
       }
       // Enforcing the call queue size, this triggers a retry in the client
       // This is a bit late to be doing this check - we have already read in the total request.
-      if ((totalRequestSize + callQueueSize.get()) > maxQueueSize) {
+      if ((totalRequestSize + callQueueSizeInBytes.get()) > maxQueueSizeInBytes) {
         final Call callTooBig =
           new Call(id, this.service, null, null, null, null, this,
             responder, totalRequestSize, null, null, 0);
@@ -1954,7 +1874,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
               totalRequestSize, traceInfo, this.addr, timeout);
 
       if (!scheduler.dispatch(new CallRunner(RpcServer.this, call))) {
-        callQueueSize.add(-1 * call.getSize());
+        callQueueSizeInBytes.add(-1 * call.getSize());
 
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
@@ -2093,12 +2013,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
     this.bindAddress = bindAddress;
     this.conf = conf;
     this.socketSendBufferSize = 0;
-    this.maxQueueSize =
-      this.conf.getInt("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
+    // See declaration above for documentation on what this size is.
+    this.maxQueueSizeInBytes =
+      this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE);
     this.readThreads = conf.getInt("hbase.ipc.server.read.threadpool.size", 10);
-    this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 1000);
-    this.maxConnectionsToNuke = conf.getInt("hbase.ipc.client.kill.max", 10);
-    this.thresholdIdleConnections = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
     this.purgeTimeout = conf.getLong("hbase.ipc.client.call.purge.timeout",
       2 * HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
     this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME);
@@ -2120,6 +2038,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
     // Create the responder here
     responder = new Responder();
+    connectionManager = new ConnectionManager();
     this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false);
     this.userProvider = UserProvider.instantiate(conf);
     this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled();
@@ -2177,12 +2096,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   protected void closeConnection(Connection connection) {
-    synchronized (connectionList) {
-      if (connectionList.remove(connection)) {
-        numConnections--;
-      }
-    }
-    connection.close();
+    connectionManager.close(connection);
   }
 
   Configuration getConf() {
@@ -2440,7 +2354,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
 
   @Override
   public void addCallSize(final long diff) {
-    this.callQueueSize.add(diff);
+    this.callQueueSizeInBytes.add(diff);
   }
 
   /**
@@ -2578,6 +2492,14 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   }
 
   /**
+   * The number of open RPC conections
+   * @return the number of open rpc connections
+   */
+  public int getNumOpenConnections() {
+    return connectionManager.size();
+  }
+
+  /**
    * Returns the username for any user associated with the current RPC
    * request or <code>null</code> if no user is set.
    */
@@ -2695,4 +2617,149 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
   public RpcScheduler getScheduler() {
     return scheduler;
   }
+
+  private class ConnectionManager {
+    final private AtomicInteger count = new AtomicInteger();
+    final private Set<Connection> connections;
+
+    final private Timer idleScanTimer;
+    final private int idleScanThreshold;
+    final private int idleScanInterval;
+    final private int maxIdleTime;
+    final private int maxIdleToClose;
+
+    ConnectionManager() {
+      this.idleScanTimer = new Timer("RpcServer idle connection scanner for port " + port, true);
+      this.idleScanThreshold = conf.getInt("hbase.ipc.client.idlethreshold", 4000);
+      this.idleScanInterval =
+          conf.getInt("hbase.ipc.client.connection.idle-scan-interval.ms", 10000);
+      this.maxIdleTime = 2 * conf.getInt("hbase.ipc.client.connection.maxidletime", 10000);
+      this.maxIdleToClose = conf.getInt("hbase.ipc.client.kill.max", 10);
+      int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+          HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+      int maxConnectionQueueSize =
+          handlerCount * conf.getInt("hbase.ipc.server.handler.queue.size", 100);
+      // create a set with concurrency -and- a thread-safe iterator, add 2
+      // for listener and idle closer threads
+      this.connections = Collections.newSetFromMap(
+          new ConcurrentHashMap<Connection,Boolean>(
+              maxConnectionQueueSize, 0.75f, readThreads+2));
+    }
+
+    private boolean add(Connection connection) {
+      boolean added = connections.add(connection);
+      if (added) {
+        count.getAndIncrement();
+      }
+      return added;
+    }
+
+    private boolean remove(Connection connection) {
+      boolean removed = connections.remove(connection);
+      if (removed) {
+        count.getAndDecrement();
+      }
+      return removed;
+    }
+
+    int size() {
+      return count.get();
+    }
+
+    Connection[] toArray() {
+      return connections.toArray(new Connection[0]);
+    }
+
+    Connection register(SocketChannel channel) {
+      Connection connection = new Connection(channel, System.currentTimeMillis());
+      add(connection);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Server connection from " + connection +
+            "; connections=" + size() +
+            ", queued calls size (bytes)=" + callQueueSizeInBytes.get() +
+            ", general queued calls=" + scheduler.getGeneralQueueLength() +
+            ", priority queued calls=" + scheduler.getPriorityQueueLength());
+      }
+      return connection;
+    }
+
+    boolean close(Connection connection) {
+      boolean exists = remove(connection);
+      if (exists) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(Thread.currentThread().getName() +
+              ": disconnecting client " + connection +
+              ". Number of active connections: "+ size());
+        }
+        // only close if actually removed to avoid double-closing due
+        // to possible races
+        connection.close();
+      }
+      return exists;
+    }
+
+    // synch'ed to avoid explicit invocation upon OOM from colliding with
+    // timer task firing
+    synchronized void closeIdle(boolean scanAll) {
+      long minLastContact = System.currentTimeMillis() - maxIdleTime;
+      // concurrent iterator might miss new connections added
+      // during the iteration, but that's ok because they won't
+      // be idle yet anyway and will be caught on next scan
+      int closed = 0;
+      for (Connection connection : connections) {
+        // stop if connections dropped below threshold unless scanning all
+        if (!scanAll && size() < idleScanThreshold) {
+          break;
+        }
+        // stop if not scanning all and max connections are closed
+        if (connection.isIdle() &&
+            connection.getLastContact() < minLastContact &&
+            close(connection) &&
+            !scanAll && (++closed == maxIdleToClose)) {
+          break;
+        }
+      }
+    }
+
+    void closeAll() {
+      // use a copy of the connections to be absolutely sure the concurrent
+      // iterator doesn't miss a connection
+      for (Connection connection : toArray()) {
+        close(connection);
+      }
+    }
+
+    void startIdleScan() {
+      scheduleIdleScanTask();
+    }
+
+    void stopIdleScan() {
+      idleScanTimer.cancel();
+    }
+
+    private void scheduleIdleScanTask() {
+      if (!running) {
+        return;
+      }
+      TimerTask idleScanTask = new TimerTask(){
+        @Override
+        public void run() {
+          if (!running) {
+            return;
+          }
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(Thread.currentThread().getName()+": task running");
+          }
+          try {
+            closeIdle(false);
+          } finally {
+            // explicitly reschedule so next execution occurs relative
+            // to the end of this scan, not the beginning
+            scheduleIdleScanTask();
+          }
+        }
+      };
+      idleScanTimer.schedule(idleScanTask, idleScanInterval);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a95552c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 1f496b4..743c5bb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -41,7 +41,7 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   @Override
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
-		HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+        HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
 
     return new SimpleRpcScheduler(
       conf,

http://git-wip-us.apache.org/repos/asf/hbase/blob/3a95552c/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
index ceb945b..45cec78 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/AbstractTestIPC.java
@@ -263,7 +263,7 @@ public abstract class AbstractTestIPC {
       fail("Expected an exception to have been thrown!");
     } catch (Exception e) {
       LOG.info("Caught expected exception: " + e.toString());
-      assertTrue(StringUtils.stringifyException(e).contains("Injected fault"));
+      assertTrue(e.toString(), StringUtils.stringifyException(e).contains("Injected fault"));
     } finally {
       rpcServer.stop();
     }


[02/50] hbase git commit: HBASE-15919 Modify docs to change from @Rule to @ClassRule. Also clarify that timeout limits are on test case level. (Apekshit)

Posted by sy...@apache.org.
HBASE-15919 Modify docs to change from @Rule to @ClassRule. Also clarify that timeout limits are on test case level. (Apekshit)

Change-Id: Ifcd0264ea147bcb1100db74d92da95b643f4793f

Signed-off-by: stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ea2f092
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ea2f092
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ea2f092

Branch: refs/heads/hbase-12439
Commit: 5ea2f092332515eea48136d7d92f7b8ea72df15b
Parents: 75c2360
Author: Apekshit <ap...@gmail.com>
Authored: Tue May 31 03:30:50 2016 -0700
Committer: stack <st...@apache.org>
Committed: Tue May 31 10:12:00 2016 -0700

----------------------------------------------------------------------
 src/main/asciidoc/_chapters/developer.adoc | 93 +++++++++----------------
 1 file changed, 33 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ea2f092/src/main/asciidoc/_chapters/developer.adoc
----------------------------------------------------------------------
diff --git a/src/main/asciidoc/_chapters/developer.adoc b/src/main/asciidoc/_chapters/developer.adoc
index a11a04e..74ce3df 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -864,7 +864,8 @@ Also, keep in mind that if you are running tests in the `hbase-server` module yo
 [[hbase.unittests]]
 === Unit Tests
 
-Apache HBase unit tests are subdivided into four categories: small, medium, large, and integration with corresponding JUnit link:http://www.junit.org/node/581[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`.
+Apache HBase test cases are subdivided into four categories: small, medium, large, and
+integration with corresponding JUnit link:http://www.junit.org/node/581[categories]: `SmallTests`, `MediumTests`, `LargeTests`, `IntegrationTests`.
 JUnit categories are denoted using java annotations and look like this in your unit test code.
 
 [source,java]
@@ -879,10 +880,11 @@ public class TestHRegionInfo {
 }
 ----
 
-The above example shows how to mark a unit test as belonging to the `small` category.
-All unit tests in HBase have a categorization.
+The above example shows how to mark a test case as belonging to the `small` category.
+All test cases in HBase should have a categorization.
 
-The first three categories, `small`, `medium`, and `large`, are for tests run when you type `$ mvn test`.
+The first three categories, `small`, `medium`, and `large`, are for test cases which run when you
+type `$ mvn test`.
 In other words, these three categorizations are for HBase unit tests.
 The `integration` category is not for unit tests, but for integration tests.
 These are run when you invoke `$ mvn verify`.
@@ -890,22 +892,23 @@ Integration tests are described in <<integration.tests,integration.tests>>.
 
 HBase uses a patched maven surefire plugin and maven profiles to implement its unit test characterizations.
 
-Keep reading to figure which annotation of the set small, medium, and large to put on your new HBase unit test.
+Keep reading to figure which annotation of the set small, medium, and large to put on your new
+HBase test case.
 
 .Categorizing Tests
 Small Tests (((SmallTests)))::
-  _Small_ tests are executed in a shared JVM.
-  We put in this category all the tests that can be executed quickly in a shared JVM.
-  The maximum execution time for a small test is 15 seconds, and small tests should not use a (mini)cluster.
+  _Small_ test cases are executed in a shared JVM and individual test cases should run in 15 seconds
+   or less; i.e. a link:https://en.wikipedia.org/wiki/JUnit[junit test fixture], a java object made
+   up of test methods, should finish in under 15 seconds. These test cases can not use mini cluster.
+   These are run as part of patch pre-commit.
 
 Medium Tests (((MediumTests)))::
-  _Medium_ tests represent tests that must be executed before proposing a patch.
-  They are designed to run in less than 30 minutes altogether, and are quite stable in their results.
-  They are designed to last less than 50 seconds individually.
-  They can use a cluster, and each of them is executed in a separate JVM.
+  _Medium_ test cases are executed in separate JVM and individual test case should run in 50 seconds
+   or less. Together, they should take less than 30 minutes, and are quite stable in their results.
+   These test cases can use a mini cluster. These are run as part of patch pre-commit.
 
 Large Tests (((LargeTests)))::
-  _Large_ tests are everything else.
+  _Large_ test cases are everything else.
   They are typically large-scale tests, regression tests for specific bugs, timeout tests, performance tests.
   They are executed before a commit on the pre-integration machines.
   They can be run on the developer machine as well.
@@ -1049,9 +1052,7 @@ ConnectionCount=1 (was 1)
 
 * All tests must be categorized, if not they could be skipped.
 * All tests should be written to be as fast as possible.
-* Small category tests should last less than 15 seconds, and must not have any side effect.
-* Medium category tests should last less than 50 seconds.
-* Large category tests should last less than 3 minutes.
+* See <<hbase.unittests,hbase.unittests> for test case categories and corresponding timeouts.
   This should ensure a good parallelization for people using it, and ease the analysis when the test fails.
 
 [[hbase.tests.sleeps]]
@@ -1080,56 +1081,28 @@ This will allow to share the cluster later.
 [[hbase.tests.example.code]]
 ==== Tests Skeleton Code
 
-Here is a test skeleton code with Categorization and a Category-based timeout Rule to copy and paste and use as basis for test contribution.
+Here is a test skeleton code with Categorization and a Category-based timeout rule to copy and paste and use as basis for test contribution.
 [source,java]
 ----
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import static org.junit.Assert.*;
-
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.junit.rules.TestName;
-import org.junit.rules.TestRule;
-
-/**
- * Skeleton HBase test
+ * Describe what this testcase tests. Talk about resources initialized in @BeforeClass (before
+ * any test is run) and before each test is run, etc.
  */
-// NOTICE: See how we've 'categorized' this test. All hbase unit tests need to be categorized as
-// either 'small', 'medium', or 'large'. See http://hbase.apache.org/book.html#hbase.tests
-// for more on these categories.
+// Specify the category as explained in <<hbase.unittests,hbase.unittests>>.
 @Category(SmallTests.class)
 public class TestExample {
-  // Handy test rule that allows you subsequently get at the name of the current method. See
-  // down in 'test()' where we use it in the 'fail' message.
+  // Replace the TestExample.class in the below with the name of your test fixture class.
+  private static final Log LOG = LogFactory.getLog(TestExample.class);
+
+  // Handy test rule that allows you subsequently get the name of the current method. See
+  // down in 'testExampleFoo()' where we use it to log current test's name.
   @Rule public TestName testName = new TestName();
 
-  // Rather than put a @Test (timeout=.... on each test so for sure the test times out, instead
-  // just the CategoryBasedTimeout... It will apply to each test in this test set, the timeout
-  // that goes w/ the particular test categorization.
-  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-        withLookingForStuckThread(true).build();
+  // CategoryBasedTimeout.forClass(<testcase>) decides the timeout based on the category
+  // (small/medium/large) of the testcase. @ClassRule requires that the full testcase runs within
+  // this timeout irrespective of individual test methods' times.
+  @ClassRule
+  public static TestRule timeout = CategoryBasedTimeout.forClass(TestExample.class);
 
   @Before
   public void setUp() throws Exception {
@@ -1140,8 +1113,8 @@ public class TestExample {
   }
 
   @Test
-  public void test() {
-    fail(testName.getMethodName() + " is not yet implemented");
+  public void testExampleFoo() {
+    LOG.info("Running test " + testName.getMethodName());
   }
 }
 ----


[15/50] hbase git commit: HBASE-15881 Allow BZIP2 compression.

Posted by sy...@apache.org.
HBASE-15881 Allow BZIP2 compression.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fc890a2e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fc890a2e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fc890a2e

Branch: refs/heads/hbase-12439
Commit: fc890a2ecb12c3c664ad00f8b2f8788dba04c71d
Parents: 7e5d530
Author: Lars Hofhansl <la...@apache.org>
Authored: Thu Jun 2 12:19:02 2016 -0700
Committer: Lars Hofhansl <la...@apache.org>
Committed: Thu Jun 2 12:19:02 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/io/compress/Compression.java   | 29 +++++++++++++++++++-
 .../hadoop/hbase/util/TestCompressionTest.java  |  2 ++
 2 files changed, 30 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/fc890a2e/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
index 821b21f..6dc4190 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/compress/Compression.java
@@ -235,7 +235,34 @@ public final class Compression {
           throw new RuntimeException(e);
         }
       }
-  };
+    },
+    BZIP2("bzip2") {
+      // Use base type to avoid compile-time dependencies.
+      private volatile transient CompressionCodec bzipCodec;
+      private transient Object lock = new Object();
+
+      @Override
+      CompressionCodec getCodec(Configuration conf) {
+        if (bzipCodec == null) {
+          synchronized (lock) {
+            if (bzipCodec == null) {
+              bzipCodec = buildCodec(conf);
+            }
+          }
+        }
+        return bzipCodec;
+      }
+
+      private CompressionCodec buildCodec(Configuration conf) {
+        try {
+          Class<?> externalCodec =
+              getClassLoaderForCodec().loadClass("org.apache.hadoop.io.compress.BZip2Codec");
+          return (CompressionCodec) ReflectionUtils.newInstance(externalCodec, conf);
+        } catch (ClassNotFoundException e) {
+          throw new RuntimeException(e);
+        }
+      }
+    };
 
     private final Configuration conf;
     private final String compressName;

http://git-wip-us.apache.org/repos/asf/hbase/blob/fc890a2e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java
index 43c7cfc..398f3f0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestCompressionTest.java
@@ -75,12 +75,14 @@ public class TestCompressionTest {
       nativeCodecTest("LZO", "lzo2", "com.hadoop.compression.lzo.LzoCodec");
       nativeCodecTest("LZ4", null, "org.apache.hadoop.io.compress.Lz4Codec");
       nativeCodecTest("SNAPPY", "snappy", "org.apache.hadoop.io.compress.SnappyCodec");
+      nativeCodecTest("BZIP2", "bzip2", "org.apache.hadoop.io.compress.BZip2Codec");
     } else {
       // Hadoop nativelib is not available
       LOG.debug("Native code not loaded");
       assertFalse(CompressionTest.testCompression("LZO"));
       assertFalse(CompressionTest.testCompression("LZ4"));
       assertFalse(CompressionTest.testCompression("SNAPPY"));
+      assertFalse(CompressionTest.testCompression("BZIP2"));
     }
   }
 


[43/50] hbase git commit: HBASE-15994 Allow selection of RpcSchedulers Adds logging by the RpcExecutors of their run configs Adds a FifoRpcSchedulerFactory so you can try Fifo scheduler.

Posted by sy...@apache.org.
HBASE-15994 Allow selection of RpcSchedulers
Adds logging by the RpcExecutors of their run configs
Adds a FifoRpcSchedulerFactory so you can try Fifo scheduler.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/031b7450
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/031b7450
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/031b7450

Branch: refs/heads/hbase-12439
Commit: 031b745001c7d54ef13f3cd6d725d0eb78095785
Parents: 407aa4d
Author: stack <st...@apache.org>
Authored: Wed Jun 8 20:23:11 2016 -0700
Committer: stack <st...@apache.org>
Committed: Wed Jun 8 20:23:11 2016 -0700

----------------------------------------------------------------------
 .../hbase/ipc/BalancedQueueRpcExecutor.java     |  4 ++
 .../hadoop/hbase/ipc/FifoRpcScheduler.java      |  5 ++
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java    | 54 ++++++++-------
 .../regionserver/FifoRpcSchedulerFactory.java   | 47 +++++++++++++
 .../hbase/regionserver/RpcSchedulerFactory.java |  4 +-
 .../regionserver/SimpleRpcSchedulerFactory.java |  6 +-
 .../regionserver/TestRpcSchedulerFactory.java   | 71 ++++++++++++++++++++
 7 files changed, 161 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
index e4205eb..3505221 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/BalancedQueueRpcExecutor.java
@@ -22,6 +22,8 @@ import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@@ -36,6 +38,7 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
 @InterfaceAudience.LimitedPrivate({ HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX })
 @InterfaceStability.Evolving
 public class BalancedQueueRpcExecutor extends RpcExecutor {
+  private static final Log LOG = LogFactory.getLog(BalancedQueueRpcExecutor.class);
 
   protected final List<BlockingQueue<CallRunner>> queues;
   private final QueueBalancer balancer;
@@ -62,6 +65,7 @@ public class BalancedQueueRpcExecutor extends RpcExecutor {
     queues = new ArrayList<BlockingQueue<CallRunner>>(numQueues);
     this.balancer = getBalancer(numQueues);
     initializeQueues(numQueues, queueClass, initargs);
+    LOG.debug(name + " queues=" + numQueues + " handlerCount=" + handlerCount);
   }
 
   protected void initializeQueues(final int numQueues,

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
index ee36f3f..70d903a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.ipc;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.DaemonThreadFactory;
 
@@ -32,6 +34,7 @@ import java.util.concurrent.atomic.AtomicInteger;
  * This can be used for HMaster, where no prioritization is needed.
  */
 public class FifoRpcScheduler extends RpcScheduler {
+  private static final Log LOG = LogFactory.getLog(FifoRpcScheduler.class);
   private final int handlerCount;
   private final int maxQueueLength;
   private final AtomicInteger queueSize = new AtomicInteger(0);
@@ -41,6 +44,8 @@ public class FifoRpcScheduler extends RpcScheduler {
     this.handlerCount = handlerCount;
     this.maxQueueLength = conf.getInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH,
         handlerCount * RpcServer.DEFAULT_MAX_CALLQUEUE_LENGTH_PER_HANDLER);
+    LOG.info("Using " + this.getClass().getSimpleName() + " as user call queue; handlerCount=" +
+        handlerCount + "; maxQueueLength=" + maxQueueLength);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
index 431aeeb..d9d61c1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcScheduler.java
@@ -34,8 +34,11 @@ import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.util.BoundedPriorityBlockingQueue;
 
 /**
- * A scheduler that maintains isolated handler pools for general,
- * high-priority, and replication requests.
+ * The default scheduler. Configurable. Maintains isolated handler pools for general ('default'),
+ * high-priority ('priority'), and replication ('replication') requests. Default behavior is to
+ * balance the requests across handlers. Add configs to enable balancing by read vs writes, etc.
+ * See below article for explanation of options.
+ * @see <a href="http://blog.cloudera.com/blog/2014/12/new-in-cdh-5-2-improvements-for-running-multiple-workloads-on-a-single-hbase-cluster/">Overview on Request Queuing</a>
  */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
@@ -49,7 +52,8 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
   public static final String CALL_QUEUE_HANDLER_FACTOR_CONF_KEY =
       "hbase.ipc.server.callqueue.handler.factor";
 
-  /** If set to 'deadline', uses a priority queue and deprioritize long-running scans */
+  /** If set to 'deadline', the default, uses a priority queue and deprioritizes long-running scans
+   */
   public static final String CALL_QUEUE_TYPE_CONF_KEY = "hbase.ipc.server.callqueue.type";
   public static final String CALL_QUEUE_TYPE_CODEL_CONF_VALUE = "codel";
   public static final String CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE = "deadline";
@@ -190,54 +194,58 @@ public class SimpleRpcScheduler extends RpcScheduler implements ConfigurationObs
 
     float callQueuesHandlersFactor = conf.getFloat(CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0);
     int numCallQueues = Math.max(1, (int)Math.round(handlerCount * callQueuesHandlersFactor));
-
-    LOG.info("Using " + callQueueType + " as user call queue, count=" + numCallQueues);
-
+    LOG.info("Using " + callQueueType + " as user call queue; numCallQueues=" + numCallQueues +
+        "; callQReadShare=" + callqReadShare + ", callQScanShare=" + callqScanShare);
     if (numCallQueues > 1 && callqReadShare > 0) {
       // multiple read/write queues
-      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
+      if (isDeadlineQueueType(callQueueType)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
             callqReadShare, callqScanShare, maxQueueLength, conf, abortable,
             BoundedPriorityBlockingQueue.class, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
         Object[] callQueueInitArgs = {maxQueueLength, codelTargetDelay, codelInterval,
           codelLifoThreshold, numGeneralCallsDropped, numLifoModeSwitches};
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount,
           numCallQueues, callqReadShare, callqScanShare,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs,
           AdaptiveLifoCoDelCallQueue.class, callQueueInitArgs);
       } else {
-        callExecutor = new RWQueueRpcExecutor("RW.default", handlerCount, numCallQueues,
+        callExecutor = new RWQueueRpcExecutor("RWQ.default", handlerCount, numCallQueues,
           callqReadShare, callqScanShare, maxQueueLength, conf, abortable);
       }
     } else {
       // multiple queues
-      if (callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE)) {
+      if (isDeadlineQueueType(callQueueType)) {
         CallPriorityComparator callPriority = new CallPriorityComparator(conf, this.priority);
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
-          conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
+        callExecutor =
+          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
+            conf, abortable, BoundedPriorityBlockingQueue.class, maxQueueLength, callPriority);
       } else if (callQueueType.equals(CALL_QUEUE_TYPE_CODEL_CONF_VALUE)) {
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount, numCallQueues,
-          conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
-          codelTargetDelay, codelInterval, codelLifoThreshold,
-          numGeneralCallsDropped, numLifoModeSwitches);
+        callExecutor =
+          new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount, numCallQueues,
+            conf, abortable, AdaptiveLifoCoDelCallQueue.class, maxQueueLength,
+            codelTargetDelay, codelInterval, codelLifoThreshold,
+            numGeneralCallsDropped, numLifoModeSwitches);
       } else {
-        callExecutor = new BalancedQueueRpcExecutor("B.default", handlerCount,
+        callExecutor = new BalancedQueueRpcExecutor("BalancedQ.default", handlerCount,
             numCallQueues, maxQueueLength, conf, abortable);
       }
     }
-
     // Create 2 queues to help priorityExecutor be more scalable.
     this.priorityExecutor = priorityHandlerCount > 0 ?
-        new BalancedQueueRpcExecutor("Priority", priorityHandlerCount, 2, maxPriorityQueueLength) :
-        null;
-
+      new BalancedQueueRpcExecutor("BalancedQ.priority", priorityHandlerCount, 2,
+          maxPriorityQueueLength):
+      null;
    this.replicationExecutor =
-     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("Replication",
+     replicationHandlerCount > 0 ? new BalancedQueueRpcExecutor("BalancedQ.replication",
        replicationHandlerCount, 1, maxQueueLength, conf, abortable) : null;
   }
 
+  private static boolean isDeadlineQueueType(final String callQueueType) {
+    return callQueueType.equals(CALL_QUEUE_TYPE_DEADLINE_CONF_VALUE);
+  }
+
   public SimpleRpcScheduler(
 	      Configuration conf,
 	      int handlerCount,

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
new file mode 100644
index 0000000..f4b51ba
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/FifoRpcSchedulerFactory.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
+import org.apache.hadoop.hbase.ipc.PriorityFunction;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+
+/**
+ * Factory to use when you want to use the {@link FifoRpcScheduler}
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FifoRpcSchedulerFactory implements RpcSchedulerFactory {
+  @Override
+  public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
+    int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
+      HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
+    return new FifoRpcScheduler(conf, handlerCount);
+  }
+
+  @Deprecated
+  @Override
+  public RpcScheduler create(Configuration conf, PriorityFunction priority) {
+    return create(conf, priority, null);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
index f554781..7bc59da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RpcSchedulerFactory.java
@@ -31,7 +31,6 @@ import org.apache.hadoop.hbase.ipc.RpcScheduler;
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public interface RpcSchedulerFactory {
-
   /**
    * Constructs a {@link org.apache.hadoop.hbase.ipc.RpcScheduler}.
    */
@@ -39,5 +38,4 @@ public interface RpcSchedulerFactory {
 
   @Deprecated
   RpcScheduler create(Configuration conf, PriorityFunction priority);
-
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
index 743c5bb..92462c8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SimpleRpcSchedulerFactory.java
@@ -27,11 +27,11 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.RpcScheduler;
 import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
 
-/** Constructs a {@link SimpleRpcScheduler}. */
+/** Constructs a {@link SimpleRpcScheduler}.
+ */
 @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX})
 @InterfaceStability.Evolving
 public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
-
   @Override
   @Deprecated
   public RpcScheduler create(Configuration conf, PriorityFunction priority) {
@@ -42,7 +42,6 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
   public RpcScheduler create(Configuration conf, PriorityFunction priority, Abortable server) {
     int handlerCount = conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
         HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT);
-
     return new SimpleRpcScheduler(
       conf,
       handlerCount,
@@ -54,5 +53,4 @@ public class SimpleRpcSchedulerFactory implements RpcSchedulerFactory {
       server,
       HConstants.QOS_THRESHOLD);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/031b7450/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
new file mode 100644
index 0000000..9366c54
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRpcSchedulerFactory.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.ipc.FifoRpcScheduler;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+import org.apache.hadoop.hbase.ipc.SimpleRpcScheduler;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Before;
+import org.junit.ClassRule;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+
+/**
+ * A silly test that does nothing but make sure an rpcscheduler factory makes what it says
+ * it is going to make.
+ */
+@Category(SmallTests.class)
+public class TestRpcSchedulerFactory {
+  @Rule public TestName testName = new TestName();
+  @ClassRule public static TestRule timeout =
+      CategoryBasedTimeout.forClass(TestRpcSchedulerFactory.class);
+  private Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    this.conf = HBaseConfiguration.create();
+  }
+
+  @Test
+  public void testRWQ() {
+    // Set some configs just to see how it changes the scheduler. Can't assert the settings had
+    // an effect. Just eyeball the log.
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_READ_SHARE_CONF_KEY, 0.5);
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_HANDLER_FACTOR_CONF_KEY, 0.5);
+    this.conf.setDouble(SimpleRpcScheduler.CALL_QUEUE_SCAN_SHARE_CONF_KEY, 0.5);
+    RpcSchedulerFactory factory = new SimpleRpcSchedulerFactory();
+    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
+    assertTrue(rpcScheduler.getClass().equals(SimpleRpcScheduler.class));
+  }
+
+  @Test
+  public void testFifo() {
+    RpcSchedulerFactory factory = new FifoRpcSchedulerFactory();
+    RpcScheduler rpcScheduler = factory.create(this.conf, null, null);
+    assertTrue(rpcScheduler.getClass().equals(FifoRpcScheduler.class));
+  }
+}
\ No newline at end of file


[20/50] hbase git commit: HBASE-15949 Cleanup TestRegionServerMetrics. @Before and @After to setup/teardown tables using @Rule to set table name based on testname. Refactor out copy-pasted code fragments to single function. (Apekshit)

Posted by sy...@apache.org.
HBASE-15949 Cleanup TestRegionServerMetrics.
@Before and @After to setup/teardown tables using @Rule to set table name based on testname.
Refactor out copy-pasted code fragments to single function.
(Apekshit)

Change-Id: Ic22e5027cc3952bab5ec30070ed20e98017db65a


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b557f0be
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b557f0be
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b557f0be

Branch: refs/heads/hbase-12439
Commit: b557f0bec62a48753e5d01d7a47f3c9e5a6b3ee8
Parents: bdb46f0
Author: Apekshit <ap...@gmail.com>
Authored: Wed Jun 1 21:52:29 2016 -0700
Committer: Apekshit Sharma <ap...@apache.org>
Committed: Fri Jun 3 14:27:24 2016 -0700

----------------------------------------------------------------------
 .../regionserver/TestRegionServerMetrics.java   | 682 +++++++------------
 1 file changed, 256 insertions(+), 426 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b557f0be/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 18796bd..06db468 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.*;
 import org.apache.hadoop.hbase.client.*;
@@ -27,11 +29,17 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
+import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.ClassRule;
 import org.junit.Ignore;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
 
 import static org.junit.Assert.*;
 
@@ -39,23 +47,35 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
-
 @Category({RegionServerTests.class, MediumTests.class})
 public class TestRegionServerMetrics {
-  private static MetricsAssertHelper metricsHelper;
+  private static final Log LOG = LogFactory.getLog(TestRegionServerMetrics.class);
+
+  @Rule
+  public TestName testName = new TestName();
+
+  @ClassRule
+  public static TestRule timeout = CategoryBasedTimeout.forClass(TestRegionServerMetrics.class);
 
   static {
     Logger.getLogger("org.apache.hadoop.hbase").setLevel(Level.DEBUG);
   }
 
+  private static MetricsAssertHelper metricsHelper;
   private static MiniHBaseCluster cluster;
   private static HRegionServer rs;
   private static Configuration conf;
   private static HBaseTestingUtility TEST_UTIL;
+  private static Connection connection;
   private static MetricsRegionServer metricsRegionServer;
   private static MetricsRegionServerSource serverSource;
   private static final int NUM_SCAN_NEXT = 30;
   private static int numScanNext = 0;
+  private static byte[] cf = Bytes.toBytes("cf");
+  private static byte[] row = Bytes.toBytes("row");
+  private static byte[] qualifier = Bytes.toBytes("qual");
+  private static byte[] val = Bytes.toBytes("val");
+  private static Admin admin;
 
   @BeforeClass
   public static void startCluster() throws Exception {
@@ -65,12 +85,16 @@ public class TestRegionServerMetrics {
     conf.getLong("hbase.splitlog.max.resubmit", 0);
     // Make the failure test faster
     conf.setInt("zookeeper.recovery.retry", 0);
+    // testMobMetrics creates few hfiles and manages compaction manually.
+    conf.setInt("hbase.hstore.compactionThreshold", 100);
+    conf.setInt("hbase.hstore.compaction.max", 100);
     conf.setInt(HConstants.REGIONSERVER_INFO_PORT, -1);
 
     TEST_UTIL.startMiniCluster(1, 1);
     cluster = TEST_UTIL.getHBaseCluster();
-
     cluster.waitForActiveAndReadyMaster();
+    admin = TEST_UTIL.getHBaseAdmin();
+    connection = TEST_UTIL.getConnection();
 
     while (cluster.getLiveRegionServerThreads().size() < 1) {
       Threads.sleep(100);
@@ -88,551 +112,370 @@ public class TestRegionServerMetrics {
     }
   }
 
-  @Test(timeout = 300000)
+  TableName tableName;
+  Table table;
+
+  @Before
+  public void beforeTestMethod() throws Exception {
+    metricsRegionServer.getRegionServerWrapper().forceRecompute();
+    tableName = TableName.valueOf(testName.getMethodName());
+    table = TEST_UTIL.createTable(tableName, cf);
+  }
+
+  @After
+  public void afterTestMethod() throws Exception {
+    admin.disableTable(tableName);
+    admin.deleteTable(tableName);
+  }
+
+  public void waitTableDeleted(TableName name, long timeoutInMillis) throws Exception {
+    long start = System.currentTimeMillis();
+    while (true) {
+      HTableDescriptor[] tables = admin.listTables();
+      for (HTableDescriptor htd : tables) {
+        if (htd.getNameAsString() == name.getNameAsString())
+          return;
+      }
+      if (System.currentTimeMillis() - start > timeoutInMillis)
+        return;
+      Thread.sleep(1000);
+    }
+  }
+
+  public void assertCounter(String metric, long expectedValue) {
+    metricsHelper.assertCounter(metric, expectedValue, serverSource);
+  }
+
+  public void assertGauge(String metric, long expectedValue) {
+    metricsHelper.assertGauge(metric, expectedValue, serverSource);
+  }
+
+  // Aggregates metrics from regions and assert given list of metrics and expected values.
+  public void assertRegionMetrics(String metric, long expectedValue) throws Exception {
+    try (RegionLocator locator = connection.getRegionLocator(tableName)) {
+      for ( HRegionLocation location: locator.getAllRegionLocations()) {
+        HRegionInfo hri = location.getRegionInfo();
+        MetricsRegionAggregateSource agg =
+            rs.getRegion(hri.getRegionName()).getMetrics().getSource().getAggregateSource();
+        String prefix = "namespace_" + NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR +
+            "_table_" + tableName.getNameAsString() +
+            "_region_" + hri.getEncodedName()+
+            "_metric_";
+        metricsHelper.assertCounter(prefix + metric, expectedValue, agg);
+      }
+    }
+  }
+
+  public void doNPuts(int n, boolean batch) throws Exception {
+    if (batch) {
+      List<Put> puts = new ArrayList<>();
+      for (int i = 0; i < n; i++) {
+        Put p = new Put(Bytes.toBytes("" + i + "row")).addColumn(cf, qualifier, val);
+        puts.add(p);
+      }
+      table.put(puts);
+    } else {
+      for (int i = 0; i < n; i++) {
+        Put p = new Put(row).addColumn(cf, qualifier, val);
+        table.put(p);
+      }
+    }
+  }
+
+  public void doNGets(int n, boolean batch) throws Exception {
+    if (batch) {
+      List<Get> gets = new ArrayList<>();
+      for (int i = 0; i < n; i++) {
+        gets.add(new Get(row));
+      }
+      table.get(gets);
+    } else {
+      for (int i = 0; i < n; i++) {
+        table.get(new Get(row));
+      }
+    }
+  }
+
+  @Test
   public void testRegionCount() throws Exception {
-    String regionMetricsKey = "regionCount";
-    long regions = metricsHelper.getGaugeLong(regionMetricsKey, serverSource);
-    // Creating a table should add one region
-    TEST_UTIL.createTable(TableName.valueOf("table"), Bytes.toBytes("cf"));
-    metricsHelper.assertGaugeGt(regionMetricsKey, regions, serverSource);
+    metricsHelper.assertGauge("regionCount", 1, serverSource);
   }
 
   @Test
   public void testLocalFiles() throws Exception {
-    metricsHelper.assertGauge("percentFilesLocal", 0, serverSource);
-    metricsHelper.assertGauge("percentFilesLocalSecondaryRegions", 0, serverSource);
+    assertGauge("percentFilesLocal", 0);
+    assertGauge("percentFilesLocalSecondaryRegions", 0);
   }
 
   @Test
   public void testRequestCount() throws Exception {
-    String tableNameString = "testRequestCount";
-    TableName tName = TableName.valueOf(tableNameString);
-    byte[] cfName = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] initValue = Bytes.toBytes("Value");
-
-    TEST_UTIL.createTable(tName, cfName);
-
-    Connection connection = TEST_UTIL.getConnection();
-    connection.getTable(tName).close(); //wait for the table to come up.
-
     // Do a first put to be sure that the connection is established, meta is there and so on.
-    Table table = connection.getTable(tName);
-    Put p = new Put(row);
-    p.addColumn(cfName, qualifier, initValue);
-    table.put(p);
+    doNPuts(1, false);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
     long requests = metricsHelper.getCounter("totalRequestCount", serverSource);
     long readRequests = metricsHelper.getCounter("readRequestCount", serverSource);
     long writeRequests = metricsHelper.getCounter("writeRequestCount", serverSource);
 
-    for (int i=0; i< 30; i++) {
-      table.put(p);
-    }
+    doNPuts(30, false);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("totalRequestCount", requests + 30, serverSource);
-    metricsHelper.assertCounter("readRequestCount", readRequests, serverSource);
-    metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource);
+    assertCounter("totalRequestCount", requests + 30);
+    assertCounter("readRequestCount", readRequests);
+    assertCounter("writeRequestCount", writeRequests + 30);
 
-    Get g = new Get(row);
-    for (int i=0; i< 10; i++) {
-      table.get(g);
-    }
+    doNGets(10, false);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("totalRequestCount", requests + 40, serverSource);
-    metricsHelper.assertCounter("readRequestCount", readRequests + 10, serverSource);
-    metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource);
+    assertCounter("totalRequestCount", requests + 40);
+    assertCounter("readRequestCount", readRequests + 10);
+    assertCounter("writeRequestCount", writeRequests + 30);
 
-    try (RegionLocator locator = connection.getRegionLocator(tName)) {
-      for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo i = location.getRegionInfo();
-        MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName())
-            .getMetrics()
-            .getSource()
-            .getAggregateSource();
-        String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
-            "_table_"+tableNameString +
-            "_region_" + i.getEncodedName()+
-            "_metric";
-        metricsHelper.assertCounter(prefix + "_getNumOps", 10, agg);
-        metricsHelper.assertCounter(prefix + "_mutateCount", 31, agg);
-      }
-    }
-    List<Get> gets = new ArrayList<Get>();
-    for (int i=0; i< 10; i++) {
-      gets.add(new Get(row));
-    }
-    table.get(gets);
+    assertRegionMetrics("getNumOps", 10);
+    assertRegionMetrics("mutateCount", 31);
+
+    doNGets(10, true);  // true = batch
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("totalRequestCount", requests + 50, serverSource);
-    metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource);
-    metricsHelper.assertCounter("writeRequestCount", writeRequests + 30, serverSource);
+    assertCounter("totalRequestCount", requests + 50);
+    assertCounter("readRequestCount", readRequests + 20);
+    assertCounter("writeRequestCount", writeRequests + 30);
 
-    List<Put> puts = new ArrayList<>();
-    for (int i=0; i< 30; i++) {
-      puts.add(p);
-    }
-    table.put(puts);
+    doNPuts(30, true);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("totalRequestCount", requests + 80, serverSource);
-    metricsHelper.assertCounter("readRequestCount", readRequests + 20, serverSource);
-    metricsHelper.assertCounter("writeRequestCount", writeRequests + 60, serverSource);
-
-    table.close();
+    assertCounter("totalRequestCount", requests + 80);
+    assertCounter("readRequestCount", readRequests + 20);
+    assertCounter("writeRequestCount", writeRequests + 60);
   }
 
   @Test
   public void testGet() throws Exception {
-    String tableNameString = "testGet";
-    TableName tName = TableName.valueOf(tableNameString);
-    byte[] cfName = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] initValue = Bytes.toBytes("Value");
-
-    TEST_UTIL.createTable(tName, cfName);
-
-    Connection connection = TEST_UTIL.getConnection();
-    connection.getTable(tName).close(); //wait for the table to come up.
-
     // Do a first put to be sure that the connection is established, meta is there and so on.
-    Table table = connection.getTable(tName);
-    Put p = new Put(row);
-    p.addColumn(cfName, qualifier, initValue);
-    table.put(p);
-
-    Get g = new Get(row);
-    for (int i=0; i< 10; i++) {
-      table.get(g);
-    }
-
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-
-    try (RegionLocator locator = connection.getRegionLocator(tName)) {
-      for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo i = location.getRegionInfo();
-        MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName())
-          .getMetrics()
-          .getSource()
-          .getAggregateSource();
-        String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
-          "_table_"+tableNameString +
-          "_region_" + i.getEncodedName()+
-          "_metric";
-        metricsHelper.assertCounter(prefix + "_getSizeNumOps", 10, agg);
-        metricsHelper.assertCounter(prefix + "_getNumOps", 10, agg);
-      }
-      metricsHelper.assertCounterGt("Get_num_ops", 10, serverSource);
-    }
-    table.close();
+    doNPuts(1, false);
+    doNGets(10, false);
+    assertRegionMetrics("getNumOps", 10);
+    assertRegionMetrics("getSizeNumOps", 10);
+    metricsHelper.assertCounterGt("Get_num_ops", 10, serverSource);
   }
 
   @Test
   public void testMutationsWithoutWal() throws Exception {
-    TableName tableName = TableName.valueOf("testMutationsWithoutWal");
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("Value");
-
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-
-    Table t = TEST_UTIL.createTable(tableName, cf);
-
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    p.setDurability(Durability.SKIP_WAL);
-
-    t.put(p);
+    Put p = new Put(row).addColumn(cf, qualifier, val)
+        .setDurability(Durability.SKIP_WAL);
+    table.put(p);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertGauge("mutationsWithoutWALCount", 1, serverSource);
+    assertGauge("mutationsWithoutWALCount", 1);
     long minLength = row.length + cf.length + qualifier.length + val.length;
     metricsHelper.assertGaugeGt("mutationsWithoutWALSize", minLength, serverSource);
-
-    t.close();
   }
 
   @Test
   public void testStoreCount() throws Exception {
-    TableName tableName = TableName.valueOf("testStoreCount");
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("Value");
-
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    long stores = metricsHelper.getGaugeLong("storeCount", serverSource);
-    long storeFiles = metricsHelper.getGaugeLong("storeFileCount", serverSource);
-
     //Force a hfile.
-    Table t = TEST_UTIL.createTable(tableName, cf);
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    t.put(p);
+    doNPuts(1, false);
     TEST_UTIL.getHBaseAdmin().flush(tableName);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertGauge("storeCount", stores +1, serverSource);
-    metricsHelper.assertGauge("storeFileCount", storeFiles + 1, serverSource);
-
-    t.close();
+    assertGauge("storeCount", 1);
+    assertGauge("storeFileCount", 1);
   }
 
   @Test
   public void testStoreFileAge() throws Exception {
-    TableName tableName = TableName.valueOf("testStoreFileAge");
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("Value");
-
     //Force a hfile.
-    Table t = TEST_UTIL.createTable(tableName, cf);
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    t.put(p);
+    doNPuts(1, false);
     TEST_UTIL.getHBaseAdmin().flush(tableName);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
     assertTrue(metricsHelper.getGaugeLong("maxStoreFileAge", serverSource) > 0);
     assertTrue(metricsHelper.getGaugeLong("minStoreFileAge", serverSource) > 0);
     assertTrue(metricsHelper.getGaugeLong("avgStoreFileAge", serverSource) > 0);
-
-    t.close();
   }
 
   @Test
   public void testCheckAndPutCount() throws Exception {
-    String tableNameString = "testCheckAndPutCount";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
     byte[] valOne = Bytes.toBytes("Value");
     byte[] valTwo = Bytes.toBytes("ValueTwo");
     byte[] valThree = Bytes.toBytes("ValueThree");
 
-    Table t = TEST_UTIL.createTable(tableName, cf);
     Put p = new Put(row);
     p.addColumn(cf, qualifier, valOne);
-    t.put(p);
+    table.put(p);
 
     Put pTwo = new Put(row);
     pTwo.addColumn(cf, qualifier, valTwo);
-    t.checkAndPut(row, cf, qualifier, valOne, pTwo);
+    table.checkAndPut(row, cf, qualifier, valOne, pTwo);
 
     Put pThree = new Put(row);
     pThree.addColumn(cf, qualifier, valThree);
-    t.checkAndPut(row, cf, qualifier, valOne, pThree);
+    table.checkAndPut(row, cf, qualifier, valOne, pThree);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("checkMutateFailedCount", 1, serverSource);
-    metricsHelper.assertCounter("checkMutatePassedCount", 1, serverSource);
-
-    t.close();
+    assertCounter("checkMutateFailedCount", 1);
+    assertCounter("checkMutatePassedCount", 1);
   }
 
   @Test
   public void testIncrement() throws Exception {
-    String tableNameString = "testIncrement";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes(0l);
-
-
-    Table t = TEST_UTIL.createTable(tableName, cf);
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    t.put(p);
+    Put p = new Put(row).addColumn(cf, qualifier, Bytes.toBytes(0L));
+    table.put(p);
 
-    for(int count = 0; count< 13; count++) {
+    for(int count = 0; count < 13; count++) {
       Increment inc = new Increment(row);
       inc.addColumn(cf, qualifier, 100);
-      t.increment(inc);
+      table.increment(inc);
     }
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("incrementNumOps", 13, serverSource);
-
-    t.close();
+    assertCounter("incrementNumOps", 13);
   }
 
   @Test
   public void testAppend() throws Exception {
-    String tableNameString = "testAppend";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("One");
-
-
-    Table t = TEST_UTIL.createTable(tableName, cf);
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    t.put(p);
+    doNPuts(1, false);
 
     for(int count = 0; count< 73; count++) {
       Append append = new Append(row);
       append.add(cf, qualifier, Bytes.toBytes(",Test"));
-      t.append(append);
+      table.append(append);
     }
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("appendNumOps", 73, serverSource);
-
-    t.close();
+    assertCounter("appendNumOps", 73);
   }
 
   @Test
-  public void testScanSize() throws IOException {
-    String tableNameString = "testScanSize";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("One");
-
-    List<Put> puts = new ArrayList<>();
-    for (int insertCount =0; insertCount < 100; insertCount++) {
-      Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
-      p.addColumn(cf, qualifier, val);
-      puts.add(p);
-    }
-    try (Table t = TEST_UTIL.createTable(tableName, cf)) {
-      t.put(puts);
-
-      Scan s = new Scan();
-      s.setBatch(1);
-      s.setCaching(1);
-      ResultScanner resultScanners = t.getScanner(s);
-
-      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-        Result result = resultScanners.next();
-        assertNotNull(result);
-        assertEquals(1, result.size());
-      }
+  public void testScanSize() throws Exception {
+    doNPuts(100, true);  // batch put
+    Scan s = new Scan();
+    s.setBatch(1);
+    s.setCaching(1);
+    ResultScanner resultScanners = table.getScanner(s);
+
+    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+      Result result = resultScanners.next();
+      assertNotNull(result);
+      assertEquals(1, result.size());
     }
     numScanNext += NUM_SCAN_NEXT;
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo i = location.getRegionInfo();
-        MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName())
-            .getMetrics()
-            .getSource()
-            .getAggregateSource();
-        String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
-            "_table_"+tableNameString +
-            "_region_" + i.getEncodedName()+
-            "_metric";
-        metricsHelper.assertCounter(prefix + "_scanSizeNumOps", NUM_SCAN_NEXT, agg);
-      }
-      metricsHelper.assertCounter("ScanSize_num_ops", numScanNext, serverSource);
-    }
-    try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
-      admin.disableTable(tableName);
-      admin.deleteTable(tableName);
-    }
+    assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT);
+    assertCounter("ScanSize_num_ops", numScanNext);
   }
 
   @Test
-  public void testScanTime() throws IOException {
-    String tableNameString = "testScanTime";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("One");
-
-    List<Put> puts = new ArrayList<>();
-    for (int insertCount =0; insertCount < 100; insertCount++) {
-      Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
-      p.addColumn(cf, qualifier, val);
-      puts.add(p);
-    }
-    try (Table t = TEST_UTIL.createTable(tableName, cf)) {
-      t.put(puts);
-
-      Scan s = new Scan();
-      s.setBatch(1);
-      s.setCaching(1);
-      ResultScanner resultScanners = t.getScanner(s);
-
-      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-        Result result = resultScanners.next();
-        assertNotNull(result);
-        assertEquals(1, result.size());
-      }
+  public void testScanTime() throws Exception {
+    doNPuts(100, true);
+    Scan s = new Scan();
+    s.setBatch(1);
+    s.setCaching(1);
+    ResultScanner resultScanners = table.getScanner(s);
+
+    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+      Result result = resultScanners.next();
+      assertNotNull(result);
+      assertEquals(1, result.size());
     }
     numScanNext += NUM_SCAN_NEXT;
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo i = location.getRegionInfo();
-        MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName())
-          .getMetrics()
-          .getSource()
-          .getAggregateSource();
-        String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
-          "_table_"+tableNameString +
-          "_region_" + i.getEncodedName()+
-          "_metric";
-        metricsHelper.assertCounter(prefix + "_scanTimeNumOps", NUM_SCAN_NEXT, agg);
-      }
-      metricsHelper.assertCounter("ScanTime_num_ops", numScanNext, serverSource);
-    }
-    try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
-      admin.disableTable(tableName);
-      admin.deleteTable(tableName);
-    }
+    assertRegionMetrics("scanTimeNumOps", NUM_SCAN_NEXT);
+    assertCounter("ScanTime_num_ops", numScanNext);
   }
 
   @Test
-  public void testScanSizeForSmallScan() throws IOException {
-    String tableNameString = "testScanSizeSmall";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("One");
-
-    List<Put> puts = new ArrayList<>();
-    for (int insertCount =0; insertCount < 100; insertCount++) {
-      Put p = new Put(Bytes.toBytes("" + insertCount + "row"));
-      p.addColumn(cf, qualifier, val);
-      puts.add(p);
-    }
-    try (Table t = TEST_UTIL.createTable(tableName, cf)) {
-      t.put(puts);
-
-      Scan s = new Scan();
-      s.setSmall(true);
-      s.setCaching(1);
-      ResultScanner resultScanners = t.getScanner(s);
-
-      for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
-        Result result = resultScanners.next();
-        assertNotNull(result);
-        assertEquals(1, result.size());
-      }
+  public void testScanSizeForSmallScan() throws Exception {
+    doNPuts(100, true);
+    Scan s = new Scan();
+    s.setSmall(true);
+    s.setCaching(1);
+    ResultScanner resultScanners = table.getScanner(s);
+
+    for (int nextCount = 0; nextCount < NUM_SCAN_NEXT; nextCount++) {
+      Result result = resultScanners.next();
+      assertNotNull(result);
+      assertEquals(1, result.size());
     }
     numScanNext += NUM_SCAN_NEXT;
-    try (RegionLocator locator = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
-      for ( HRegionLocation location: locator.getAllRegionLocations()) {
-        HRegionInfo i = location.getRegionInfo();
-        MetricsRegionAggregateSource agg = rs.getRegion(i.getRegionName())
-            .getMetrics()
-            .getSource()
-            .getAggregateSource();
-        String prefix = "namespace_"+NamespaceDescriptor.DEFAULT_NAMESPACE_NAME_STR+
-            "_table_"+tableNameString +
-            "_region_" + i.getEncodedName()+
-            "_metric";
-        metricsHelper.assertCounter(prefix + "_scanSizeNumOps", NUM_SCAN_NEXT, agg);
-      }
-      metricsHelper.assertCounter("ScanSize_num_ops", numScanNext, serverSource);
-    }
-    try (Admin admin = TEST_UTIL.getHBaseAdmin()) {
-      admin.disableTable(tableName);
-      admin.deleteTable(tableName);
-    }
+    assertRegionMetrics("scanSizeNumOps", NUM_SCAN_NEXT);
+    assertCounter("ScanSize_num_ops", numScanNext);
   }
 
   @Test
   public void testMobMetrics() throws IOException, InterruptedException {
-    String tableNameString = "testMobMetrics";
-    TableName tableName = TableName.valueOf(tableNameString);
-    byte[] cf = Bytes.toBytes("d");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("mobdata");
-    int numHfiles = conf.getInt("hbase.hstore.compactionThreshold", 3) - 1;
+    TableName tableName = TableName.valueOf("testMobMetricsLocal");
+    int numHfiles = 5;
     HTableDescriptor htd = new HTableDescriptor(tableName);
     HColumnDescriptor hcd = new HColumnDescriptor(cf);
     hcd.setMobEnabled(true);
     hcd.setMobThreshold(0);
     htd.addFamily(hcd);
-    Connection connection = ConnectionFactory.createConnection(conf);
-    Admin admin = connection.getAdmin();
-    Table t = TEST_UTIL.createTable(htd, new byte[0][0], conf);
-    Region region = rs.getOnlineRegions(tableName).get(0);
-    for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
-      Put p = new Put(Bytes.toBytes(insertCount));
-      p.addColumn(cf, qualifier, val);
-      t.put(p);
-      admin.flush(tableName);
-    }
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("mobFlushCount", numHfiles, serverSource);
-    Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(2));
-    ResultScanner scanner = t.getScanner(scan);
-    scanner.next(100);
-    numScanNext++;  // this is an ugly construct
-    scanner.close();
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("mobScanCellsCount", 2, serverSource);
-    region.getTableDesc().getFamily(cf).setMobThreshold(100);
-    ((HRegion)region).initialize();
-    region.compact(true);
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    metricsHelper.assertCounter("cellsCountCompactedFromMob", numHfiles,
-        serverSource);
-    metricsHelper.assertCounter("cellsCountCompactedToMob", 0, serverSource);
-    scanner = t.getScanner(scan);
-    scanner.next(100);
-    numScanNext++;  // this is an ugly construct
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    // metrics are reset by the region initialization
-    metricsHelper.assertCounter("mobScanCellsCount", 0, serverSource);
-    for (int insertCount = numHfiles;
-        insertCount < 2 * numHfiles - 1; insertCount++) {
-      Put p = new Put(Bytes.toBytes(insertCount));
-      p.addColumn(cf, qualifier, val);
-      t.put(p);
-      admin.flush(tableName);
+    byte[] val = Bytes.toBytes("mobdata");
+    try {
+      Table table = TEST_UTIL.createTable(htd, new byte[0][0], conf);
+      Region region = rs.getOnlineRegions(tableName).get(0);
+      for (int insertCount = 0; insertCount < numHfiles; insertCount++) {
+        Put p = new Put(Bytes.toBytes(insertCount));
+        p.addColumn(cf, qualifier, val);
+        table.put(p);
+        admin.flush(tableName);
+      }
+      metricsRegionServer.getRegionServerWrapper().forceRecompute();
+      assertCounter("mobFlushCount", numHfiles);
+
+      Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles));
+      ResultScanner scanner = table.getScanner(scan);
+      scanner.next(100);
+      numScanNext++;  // this is an ugly construct
+      scanner.close();
+      metricsRegionServer.getRegionServerWrapper().forceRecompute();
+      assertCounter("mobScanCellsCount", numHfiles);
+
+      region.getTableDesc().getFamily(cf).setMobThreshold(100);
+      // metrics are reset by the region initialization
+      ((HRegion) region).initialize();
+      region.compact(true);
+      metricsRegionServer.getRegionServerWrapper().forceRecompute();
+      assertCounter("cellsCountCompactedFromMob", numHfiles);
+      assertCounter("cellsCountCompactedToMob", 0);
+
+      scanner = table.getScanner(scan);
+      scanner.next(100);
+      numScanNext++;  // this is an ugly construct
+      metricsRegionServer.getRegionServerWrapper().forceRecompute();
+      assertCounter("mobScanCellsCount", 0);
+
+      for (int insertCount = numHfiles; insertCount < 2 * numHfiles; insertCount++) {
+        Put p = new Put(Bytes.toBytes(insertCount));
+        p.addColumn(cf, qualifier, val);
+        table.put(p);
+        admin.flush(tableName);
+      }
+      region.getTableDesc().getFamily(cf).setMobThreshold(0);
+      // metrics are reset by the region initialization
+      ((HRegion) region).initialize();
+      region.compact(true);
+      metricsRegionServer.getRegionServerWrapper().forceRecompute();
+      // metrics are reset by the region initialization
+      assertCounter("cellsCountCompactedFromMob", 0);
+      assertCounter("cellsCountCompactedToMob", 2 * numHfiles);
+    } finally {
+      admin.disableTable(tableName);
+      admin.deleteTable(tableName);
     }
-    region.getTableDesc().getFamily(cf).setMobThreshold(0);
-    ((HRegion)region).initialize();
-    region.compact(true);
-    metricsRegionServer.getRegionServerWrapper().forceRecompute();
-    // metrics are reset by the region initialization
-    metricsHelper.assertCounter("cellsCountCompactedFromMob", 0, serverSource);
-    metricsHelper.assertCounter("cellsCountCompactedToMob", 2 * numHfiles - 1,
-        serverSource);
-    t.close();
-    admin.close();
-    connection.close();
   }
   
   @Test
   @Ignore
   public void testRangeCountMetrics() throws Exception {
-    String tableNameString = "testRangeCountMetrics";
     final long[] timeranges =
         { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 };
     final String timeRangeType = "TimeRangeCount";
     final String timeRangeMetricName = "Mutate";
     boolean timeRangeCountUpdated = false;
 
-    TableName tName = TableName.valueOf(tableNameString);
-    byte[] cfName = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] initValue = Bytes.toBytes("Value");
-
-    TEST_UTIL.createTable(tName, cfName);
-
-    Connection connection = TEST_UTIL.getConnection();
-    connection.getTable(tName).close(); // wait for the table to come up.
-
     // Do a first put to be sure that the connection is established, meta is there and so on.
-    Table table = connection.getTable(tName);
     Put p = new Put(row);
-    p.addColumn(cfName, qualifier, initValue);
+    p.addColumn(cf, qualifier, val);
     table.put(p);
 
     // do some puts and gets
@@ -672,28 +515,15 @@ public class TestRegionServerMetrics {
       }
     }
     assertEquals(true, timeRangeCountUpdated);
-
-    table.close();
   }
 
   @Test
   public void testAverageRegionSize() throws Exception {
-    TableName tableName = TableName.valueOf("testAverageRegionSize");
-    byte[] cf = Bytes.toBytes("d");
-    byte[] row = Bytes.toBytes("rk");
-    byte[] qualifier = Bytes.toBytes("qual");
-    byte[] val = Bytes.toBytes("Value");
-
     //Force a hfile.
-    Table t = TEST_UTIL.createTable(tableName, cf);
-    Put p = new Put(row);
-    p.addColumn(cf, qualifier, val);
-    t.put(p);
+    doNPuts(1, false);
     TEST_UTIL.getHBaseAdmin().flush(tableName);
 
     metricsRegionServer.getRegionServerWrapper().forceRecompute();
     assertTrue(metricsHelper.getGaugeDouble("averageRegionSize", serverSource) > 0.0);
-
-    t.close();
   }
 }


[45/50] hbase git commit: HBASE-15952 Bulk load data replication is not working when RS user does not have permission on hfile-refs node

Posted by sy...@apache.org.
HBASE-15952 Bulk load data replication is not working when RS user does not have permission on hfile-refs node


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9012a0b1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9012a0b1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9012a0b1

Branch: refs/heads/hbase-12439
Commit: 9012a0b123b3eea8b08c8687cef812e83e9b491d
Parents: 41cc215
Author: Ashish Singhi <as...@apache.org>
Authored: Thu Jun 9 18:44:29 2016 +0530
Committer: Ashish Singhi <as...@apache.org>
Committed: Thu Jun 9 18:44:29 2016 +0530

----------------------------------------------------------------------
 .../replication/ReplicationPeersZKImpl.java     | 21 -------------
 .../hbase/replication/ReplicationQueues.java    |  6 ++++
 .../replication/ReplicationQueuesHBaseImpl.java |  6 ++++
 .../replication/ReplicationQueuesZKImpl.java    | 33 ++++++++++++++++----
 .../regionserver/ReplicationSourceManager.java  | 11 +++++--
 .../cleaner/TestReplicationHFileCleaner.java    |  1 +
 .../replication/TestReplicationStateBasic.java  |  5 +++
 7 files changed, 53 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 15265d9..5af97c2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -129,17 +129,6 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
 
       ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
 
-      // Irrespective of bulk load hfile replication is enabled or not we add peerId node to
-      // hfile-refs node -- HBASE-15397
-      try {
-        String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id);
-        LOG.info("Adding peer " + peerId + " to hfile reference queue.");
-        ZKUtil.createWithParents(this.zookeeper, peerId);
-      } catch (KeeperException e) {
-        throw new ReplicationException("Failed to add peer with id=" + id
-            + ", node under hfile references node.", e);
-      }
-
       List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
       ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(getPeerNode(id),
         ReplicationSerDeHelper.toByteArray(peerConfig));
@@ -166,16 +155,6 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
             + " because that id does not exist.");
       }
       ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id));
-      // Delete peerId node from hfile-refs node irrespective of whether bulk loaded hfile
-      // replication is enabled or not
-
-      String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id);
-      try {
-        LOG.info("Removing peer " + peerId + " from hfile reference queue.");
-        ZKUtil.deleteNodeRecursively(this.zookeeper, peerId);
-      } catch (NoNodeException e) {
-        LOG.info("Did not find node " + peerId + " to delete.", e);
-      }
     } catch (KeeperException e) {
       throw new ReplicationException("Could not remove peer with id=" + id, e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
index db6da91..809b122 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
@@ -123,6 +123,12 @@ public interface ReplicationQueues {
   void addPeerToHFileRefs(String peerId) throws ReplicationException;
 
   /**
+   * Remove a peer from hfile reference queue.
+   * @param peerId peer cluster id to be removed
+   */
+  void removePeerFromHFileRefs(String peerId);
+
+  /**
    * Add new hfile references to the queue.
    * @param peerId peer cluster id to which the hfiles need to be replicated
    * @param files list of hfile references to be added

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
index bbc9e32..29f0632 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesHBaseImpl.java
@@ -302,6 +302,12 @@ public class ReplicationQueuesHBaseImpl implements ReplicationQueues {
   }
 
   @Override
+  public void removePeerFromHFileRefs(String peerId) {
+    // TODO
+    throw new NotImplementedException();
+  }
+
+  @Override
   public void addHFileRefs(String peerId, List<String> files) throws ReplicationException {
     // TODO
     throw new NotImplementedException();

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 32d0883..f03efff 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -89,12 +89,14 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     } catch (KeeperException e) {
       throw new ReplicationException("Could not initialize replication queues.", e);
     }
-    // Irrespective of bulk load hfile replication is enabled or not we add peerId node to
-    // hfile-refs node -- HBASE-15397
-    try {
-      ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode);
-    } catch (KeeperException e) {
-      throw new ReplicationException("Could not initialize hfile references replication queue.", e);
+    if (conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
+      HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
+      try {
+        ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode);
+      } catch (KeeperException e) {
+        throw new ReplicationException("Could not initialize hfile references replication queue.",
+            e);
+      }
     }
   }
 
@@ -504,4 +506,23 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
           e);
     }
   }
+
+  @Override
+  public void removePeerFromHFileRefs(String peerId) {
+    final String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
+    try {
+      if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Peer " + peerZnode + " not found in hfile reference queue.");
+        }
+        return;
+      } else {
+        LOG.info("Removing peer " + peerZnode + " from hfile reference queue.");
+        ZKUtil.deleteNodeRecursively(this.zookeeper, peerZnode);
+      }
+    } catch (KeeperException e) {
+      LOG.error("Ignoring the exception to remove peer " + peerId + " from hfile reference queue.",
+        e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index ed2eecc..e9330f4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -115,6 +115,7 @@ public class ReplicationSourceManager implements ReplicationListener {
   private final ThreadPoolExecutor executor;
 
   private final Random rand;
+  private final boolean replicationForBulkLoadDataEnabled;
 
 
   /**
@@ -166,6 +167,9 @@ public class ReplicationSourceManager implements ReplicationListener {
     this.executor.setThreadFactory(tfb.build());
     this.rand = new Random();
     this.latestPaths = Collections.synchronizedSet(new HashSet<Path>());
+    replicationForBulkLoadDataEnabled =
+        conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
+          HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
   }
 
   /**
@@ -227,9 +231,6 @@ public class ReplicationSourceManager implements ReplicationListener {
    * old region server wal queues
    */
   protected void init() throws IOException, ReplicationException {
-    boolean replicationForBulkLoadDataEnabled =
-        conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
-          HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
     for (String id : this.replicationPeers.getPeerIds()) {
       addSource(id);
       if (replicationForBulkLoadDataEnabled) {
@@ -579,6 +580,7 @@ public class ReplicationSourceManager implements ReplicationListener {
   @Override
   public void peerRemoved(String peerId) {
     removePeer(peerId);
+    this.replicationQueues.removePeerFromHFileRefs(peerId);
   }
 
   @Override
@@ -588,6 +590,9 @@ public class ReplicationSourceManager implements ReplicationListener {
         boolean added = this.replicationPeers.peerAdded(id);
         if (added) {
           addSource(id);
+          if (replicationForBulkLoadDataEnabled) {
+            this.replicationQueues.addPeerToHFileRefs(id);
+          }
         }
       } catch (Exception e) {
         LOG.error("Error while adding a new peer", e);

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
index 1778e73..e5f1e69 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.java
@@ -111,6 +111,7 @@ public class TestReplicationHFileCleaner {
   public void setup() throws ReplicationException, IOException {
     root = TEST_UTIL.getDataTestDirOnTestFS();
     rp.addPeer(peerId, new ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
+    rq.addPeerToHFileRefs(peerId);
   }
 
   @After

http://git-wip-us.apache.org/repos/asf/hbase/blob/9012a0b1/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
index 5ab26ab..de5cc31 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java
@@ -204,6 +204,7 @@ public abstract class TestReplicationStateBasic {
     assertNull(rqc.getReplicableHFiles(ID_ONE));
     assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size());
     rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE));
+    rq1.addPeerToHFileRefs(ID_ONE);
     rq1.addHFileRefs(ID_ONE, files1);
     assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size());
     assertEquals(3, rqc.getReplicableHFiles(ID_ONE).size());
@@ -225,7 +226,9 @@ public abstract class TestReplicationStateBasic {
 
     rp.init();
     rp.addPeer(ID_ONE, new ReplicationPeerConfig().setClusterKey(KEY_ONE));
+    rq1.addPeerToHFileRefs(ID_ONE);
     rp.addPeer(ID_TWO, new ReplicationPeerConfig().setClusterKey(KEY_TWO));
+    rq1.addPeerToHFileRefs(ID_TWO);
 
     List<String> files1 = new ArrayList<String>(3);
     files1.add("file_1");
@@ -238,11 +241,13 @@ public abstract class TestReplicationStateBasic {
     assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size());
 
     rp.removePeer(ID_ONE);
+    rq1.removePeerFromHFileRefs(ID_ONE);
     assertEquals(1, rqc.getAllPeersFromHFileRefsQueue().size());
     assertNull(rqc.getReplicableHFiles(ID_ONE));
     assertEquals(3, rqc.getReplicableHFiles(ID_TWO).size());
 
     rp.removePeer(ID_TWO);
+    rq1.removePeerFromHFileRefs(ID_TWO);
     assertEquals(0, rqc.getAllPeersFromHFileRefsQueue().size());
     assertNull(rqc.getReplicableHFiles(ID_TWO));
   }


[40/50] hbase git commit: HBASE-15959 Fix flaky test TestRegionServerMetrics.testMobMetrics (Huaxiang Sun and Jingcheng Du)

Posted by sy...@apache.org.
HBASE-15959 Fix flaky test TestRegionServerMetrics.testMobMetrics (Huaxiang Sun and Jingcheng Du)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a7172d56
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a7172d56
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a7172d56

Branch: refs/heads/hbase-12439
Commit: a7172d5611f33dde2566a4e140aeddc675a56f3a
Parents: d9463bc
Author: Jonathan M Hsieh <jm...@apache.org>
Authored: Wed Jun 8 19:07:13 2016 -0700
Committer: Jonathan M Hsieh <jm...@apache.org>
Committed: Wed Jun 8 19:07:13 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/regionserver/TestRegionServerMetrics.java       | 4 ++++
 1 file changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a7172d56/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index 06db468..89a82a7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -451,6 +451,10 @@ public class TestRegionServerMetrics {
         admin.flush(tableName);
       }
       region.getTableDesc().getFamily(cf).setMobThreshold(0);
+
+      // closing the region forces the compaction.discharger to archive the compacted hfiles
+      ((HRegion) region).close();
+
       // metrics are reset by the region initialization
       ((HRegion) region).initialize();
       region.compact(true);


[38/50] hbase git commit: HBASE-15107 Procedure v2 - Procedure Queue with Region locks

Posted by sy...@apache.org.
HBASE-15107 Procedure v2 - Procedure Queue with Region locks


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5d9b7d5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5d9b7d5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5d9b7d5

Branch: refs/heads/hbase-12439
Commit: d5d9b7d500c4e2bdf67abed462eeee966f7bf7df
Parents: d05a372
Author: Matteo Bertozzi <ma...@cloudera.com>
Authored: Wed Jun 8 12:52:58 2016 -0700
Committer: Matteo Bertozzi <ma...@cloudera.com>
Committed: Wed Jun 8 12:52:58 2016 -0700

----------------------------------------------------------------------
 .../hadoop/hbase/procedure2/Procedure.java      |  31 +-
 .../hbase/procedure2/ProcedureExecutor.java     |  19 +-
 .../procedure2/ProcedureSuspendedException.java |  39 +++
 .../hbase/procedure2/SequentialProcedure.java   |   2 +-
 .../procedure/MasterProcedureScheduler.java     | 286 +++++++++++++++++--
 .../procedure/TableProcedureInterface.java      |   1 +
 .../procedure/TestMasterProcedureScheduler.java | 228 ++++++++++++++-
 7 files changed, 580 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index 7e58420..ee61841 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -79,6 +79,9 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
   private int childrenLatch = 0;
   private long lastUpdate;
 
+  // TODO: it will be nice having pointers to allow the scheduler doing suspend/resume tricks
+  private boolean suspended = false;
+
   private RemoteProcedureException exception = null;
   private byte[] result = null;
 
@@ -94,7 +97,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    * @throws InterruptedException the procedure will be added back to the queue and retried later
    */
   protected abstract Procedure[] execute(TEnvironment env)
-    throws ProcedureYieldException, InterruptedException;
+    throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException;
 
   /**
    * The code to undo what done by the execute() code.
@@ -276,6 +279,9 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    */
   protected void toStringState(StringBuilder builder) {
     builder.append(getState());
+    if (isSuspended()) {
+      builder.append("|SUSPENDED");
+    }
   }
 
   /**
@@ -319,7 +325,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
   }
 
   public long getParentProcId() {
-    return parentProcId;
+    return parentProcId.longValue();
   }
 
   public NonceKey getNonceKey() {
@@ -371,6 +377,23 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
     return false;
   }
 
+  /**
+   * @return true if the procedure is in a suspended state,
+   *         waiting for the resources required to execute the procedure will become available.
+   */
+  public synchronized boolean isSuspended() {
+    return suspended;
+  }
+
+  public synchronized void suspend() {
+    suspended = true;
+  }
+
+  public synchronized void resume() {
+    assert isSuspended() : this + " expected suspended state, got " + state;
+    suspended = false;
+  }
+
   public synchronized RemoteProcedureException getException() {
     return exception;
   }
@@ -398,7 +421,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    * @return the timeout in msec
    */
   public int getTimeout() {
-    return timeout;
+    return timeout.intValue();
   }
 
   /**
@@ -494,7 +517,7 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
    */
   @InterfaceAudience.Private
   protected Procedure[] doExecute(final TEnvironment env)
-      throws ProcedureYieldException, InterruptedException {
+      throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
     try {
       updateTimestamp();
       return execute(env);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index f43b65f..9d71f65 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -505,15 +505,25 @@ public class ProcedureExecutor<TEnvironment> {
       }
     };
 
+    long st, et;
+
     // Acquire the store lease.
+    st = EnvironmentEdgeManager.currentTime();
     store.recoverLease();
+    et = EnvironmentEdgeManager.currentTime();
+    LOG.info(String.format("recover procedure store (%s) lease: %s",
+      store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
 
     // TODO: Split in two steps.
     // TODO: Handle corrupted procedures (currently just a warn)
     // The first one will make sure that we have the latest id,
     // so we can start the threads and accept new procedures.
     // The second step will do the actual load of old procedures.
+    st = EnvironmentEdgeManager.currentTime();
     load(abortOnCorruption);
+    et = EnvironmentEdgeManager.currentTime();
+    LOG.info(String.format("load procedure store (%s): %s",
+      store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
 
     // Start the executors. Here we must have the lastProcId set.
     for (int i = 0; i < threads.length; ++i) {
@@ -840,7 +850,7 @@ public class ProcedureExecutor<TEnvironment> {
       }
 
       // Execute the procedure
-      assert proc.getState() == ProcedureState.RUNNABLE;
+      assert proc.getState() == ProcedureState.RUNNABLE : proc;
       if (proc.acquireLock(getEnvironment())) {
         execProcedure(procStack, proc);
         proc.releaseLock(getEnvironment());
@@ -1042,6 +1052,7 @@ public class ProcedureExecutor<TEnvironment> {
     Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE);
 
     // Execute the procedure
+    boolean isSuspended = false;
     boolean reExecute = false;
     Procedure[] subprocs = null;
     do {
@@ -1051,6 +1062,8 @@ public class ProcedureExecutor<TEnvironment> {
         if (subprocs != null && subprocs.length == 0) {
           subprocs = null;
         }
+      } catch (ProcedureSuspendedException e) {
+        isSuspended = true;
       } catch (ProcedureYieldException e) {
         if (LOG.isTraceEnabled()) {
           LOG.trace("Yield procedure: " + procedure + ": " + e.getMessage());
@@ -1086,7 +1099,7 @@ public class ProcedureExecutor<TEnvironment> {
                 break;
               }
 
-              assert subproc.getState() == ProcedureState.INITIALIZING;
+              assert subproc.getState() == ProcedureState.INITIALIZING : subproc;
               subproc.setParentProcId(procedure.getProcId());
               subproc.setProcId(nextProcId());
             }
@@ -1107,7 +1120,7 @@ public class ProcedureExecutor<TEnvironment> {
           }
         } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) {
           waitingTimeout.add(procedure);
-        } else {
+        } else if (!isSuspended) {
           // No subtask, so we are done
           procedure.setState(ProcedureState.FINISHED);
         }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java
new file mode 100644
index 0000000..f28d57a
--- /dev/null
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class ProcedureSuspendedException extends ProcedureException {
+  /** default constructor */
+  public ProcedureSuspendedException() {
+    super();
+  }
+
+  /**
+   * Constructor
+   * @param s message
+   */
+  public ProcedureSuspendedException(String s) {
+    super(s);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
index 636a037..f0bcdea 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java
@@ -42,7 +42,7 @@ public abstract class SequentialProcedure<TEnvironment> extends Procedure<TEnvir
 
   @Override
   protected Procedure[] doExecute(final TEnvironment env)
-      throws ProcedureYieldException, InterruptedException {
+      throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
     updateTimestamp();
     try {
       Procedure[] children = !executed ? execute(env) : null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 5f37720..d4791fe 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -20,12 +20,15 @@ package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
 import java.util.ArrayDeque;
+import java.util.Arrays;
+import java.util.HashMap;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
@@ -103,6 +106,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
   }
 
   private void doAdd(final Procedure proc, final boolean addFront) {
+    doAdd(proc, addFront, true);
+  }
+
+  private void doAdd(final Procedure proc, final boolean addFront, final boolean notify) {
     schedLock.lock();
     try {
       if (isTableProcedure(proc)) {
@@ -117,7 +124,9 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
         throw new UnsupportedOperationException(
           "RQs for non-table/non-server procedures are not implemented yet");
       }
-      schedWaitCond.signal();
+      if (notify) {
+        schedWaitCond.signal();
+      }
     } finally {
       schedLock.unlock();
     }
@@ -125,12 +134,28 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
 
   private <T extends Comparable<T>> void doAdd(final FairQueue<T> fairq,
       final Queue<T> queue, final Procedure proc, final boolean addFront) {
+    if (proc.isSuspended()) return;
+
     queue.add(proc, addFront);
+
     if (!(queue.isSuspended() || queue.hasExclusiveLock())) {
+      // the queue is not suspended or removed from the fairq (run-queue)
+      // because someone has an xlock on it.
+      // so, if the queue is not-linked we should add it
       if (queue.size() == 1 && !IterableList.isLinked(queue)) {
         fairq.add(queue);
       }
       queueSize++;
+    } else if (proc.hasParent() && queue.isLockOwner(proc.getParentProcId())) {
+      assert addFront : "expected to add a child in the front";
+      assert !queue.isSuspended() : "unexpected suspended state for the queue";
+      // our (proc) parent has the xlock,
+      // so the queue is not in the fairq (run-queue)
+      // add it back to let the child run (inherit the lock)
+      if (!IterableList.isLinked(queue)) {
+        fairq.add(queue);
+      }
+      queueSize++;
     }
   }
 
@@ -140,7 +165,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
   }
 
   @edu.umd.cs.findbugs.annotations.SuppressWarnings("WA_AWAIT_NOT_IN_LOOP")
-  Procedure poll(long waitNsec) {
+  protected Procedure poll(long waitNsec) {
     Procedure pollResult = null;
     schedLock.lock();
     try {
@@ -185,7 +210,16 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     this.queueSize--;
     if (rq.isEmpty() || rq.requireExclusiveLock(pollResult)) {
       removeFromRunQueue(fairq, rq);
+    } else if (pollResult.hasParent() && rq.isLockOwner(pollResult.getParentProcId())) {
+      // if the rq is in the fairq because of runnable child
+      // check if the next procedure is still a child.
+      // if not, remove the rq from the fairq and go back to the xlock state
+      Procedure nextProc = rq.peek();
+      if (nextProc != null && nextProc.getParentProcId() != pollResult.getParentProcId()) {
+        removeFromRunQueue(fairq, rq);
+      }
     }
+
     return pollResult;
   }
 
@@ -300,18 +334,25 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
   }
 
   public boolean waitEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) {
+    return waitEvent(event, /* lockEvent= */false, procedure, suspendQueue);
+  }
+
+  private boolean waitEvent(ProcedureEvent event, boolean lockEvent,
+      Procedure procedure, boolean suspendQueue) {
     synchronized (event) {
       if (event.isReady()) {
+        if (lockEvent) {
+          event.setReady(false);
+        }
         return false;
       }
 
-      // TODO: Suspend single procedure not implemented yet, fallback to suspending the queue
-      if (!suspendQueue) suspendQueue = true;
-
-      if (isTableProcedure(procedure)) {
-        waitTableEvent(event, procedure, suspendQueue);
+      if (!suspendQueue) {
+        suspendProcedure(event, procedure);
+      } else if (isTableProcedure(procedure)) {
+        waitTableEvent(event, procedure);
       } else if (isServerProcedure(procedure)) {
-        waitServerEvent(event, procedure, suspendQueue);
+        waitServerEvent(event, procedure);
       } else {
         // TODO: at the moment we only have Table and Server procedures
         // if you are implementing a non-table/non-server procedure, you have two options: create
@@ -324,17 +365,16 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     return true;
   }
 
-  private void waitTableEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) {
+  private void waitTableEvent(ProcedureEvent event, Procedure procedure) {
     final TableName tableName = getTableName(procedure);
     final boolean isDebugEnabled = LOG.isDebugEnabled();
 
     schedLock.lock();
     try {
       TableQueue queue = getTableQueue(tableName);
+      queue.addFront(procedure);
       if (queue.isSuspended()) return;
 
-      // TODO: if !suspendQueue
-
       if (isDebugEnabled) {
         LOG.debug("Suspend table queue " + tableName);
       }
@@ -346,7 +386,7 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     }
   }
 
-  private void waitServerEvent(ProcedureEvent event, Procedure procedure, boolean suspendQueue) {
+  private void waitServerEvent(ProcedureEvent event, Procedure procedure) {
     final ServerName serverName = getServerName(procedure);
     final boolean isDebugEnabled = LOG.isDebugEnabled();
 
@@ -354,10 +394,9 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     try {
       // TODO: This will change once we have the new AM
       ServerQueue queue = getServerQueue(serverName);
+      queue.addFront(procedure);
       if (queue.isSuspended()) return;
 
-      // TODO: if !suspendQueue
-
       if (isDebugEnabled) {
         LOG.debug("Suspend server queue " + serverName);
       }
@@ -399,6 +438,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
           addToRunQueue(serverRunQueue, queue);
         }
 
+        while (event.hasWaitingProcedures()) {
+          wakeProcedure(event.popWaitingProcedure(false));
+        }
+
         if (queueSize > 1) {
           schedWaitCond.signalAll();
         } else if (queueSize > 0) {
@@ -410,7 +453,41 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     }
   }
 
-  public static class ProcedureEvent {
+  private void suspendProcedure(BaseProcedureEvent event, Procedure procedure) {
+    procedure.suspend();
+    event.suspendProcedure(procedure);
+  }
+
+  private void wakeProcedure(Procedure procedure) {
+    procedure.resume();
+    doAdd(procedure, /* addFront= */ true, /* notify= */false);
+  }
+
+  private static abstract class BaseProcedureEvent {
+    private ArrayDeque<Procedure> waitingProcedures = null;
+
+    protected void suspendProcedure(Procedure proc) {
+      if (waitingProcedures == null) {
+        waitingProcedures = new ArrayDeque<Procedure>();
+      }
+      waitingProcedures.addLast(proc);
+    }
+
+    protected boolean hasWaitingProcedures() {
+      return waitingProcedures != null;
+    }
+
+    protected Procedure popWaitingProcedure(boolean popFront) {
+      // it will be nice to use IterableList on a procedure and avoid allocations...
+      Procedure proc = popFront ? waitingProcedures.removeFirst() : waitingProcedures.removeLast();
+      if (waitingProcedures.isEmpty()) {
+        waitingProcedures = null;
+      }
+      return proc;
+    }
+  }
+
+  public static class ProcedureEvent extends BaseProcedureEvent {
     private final String description;
 
     private Queue<ServerName> waitingServers = null;
@@ -585,9 +662,47 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
     }
   }
 
+  private static class RegionEvent extends BaseProcedureEvent {
+    private final HRegionInfo regionInfo;
+    private long exclusiveLockProcIdOwner = Long.MIN_VALUE;
+
+    public RegionEvent(HRegionInfo regionInfo) {
+      this.regionInfo = regionInfo;
+    }
+
+    public boolean hasExclusiveLock() {
+      return exclusiveLockProcIdOwner != Long.MIN_VALUE;
+    }
+
+    public boolean isLockOwner(long procId) {
+      return exclusiveLockProcIdOwner == procId;
+    }
+
+    public boolean tryExclusiveLock(long procIdOwner) {
+      assert procIdOwner != Long.MIN_VALUE;
+      if (hasExclusiveLock()) return false;
+      exclusiveLockProcIdOwner = procIdOwner;
+      return true;
+    }
+
+    private void releaseExclusiveLock() {
+      exclusiveLockProcIdOwner = Long.MIN_VALUE;
+    }
+
+    public HRegionInfo getRegionInfo() {
+      return regionInfo;
+    }
+
+    @Override
+    public String toString() {
+      return String.format("region %s event", regionInfo.getRegionNameAsString());
+    }
+  }
+
   public static class TableQueue extends QueueImpl<TableName> {
     private final NamespaceQueue namespaceQueue;
 
+    private HashMap<HRegionInfo, RegionEvent> regionEventMap;
     private TableLock tableLock = null;
 
     public TableQueue(TableName tableName, NamespaceQueue namespaceQueue, int priority) {
@@ -601,7 +716,41 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
 
     @Override
     public synchronized boolean isAvailable() {
-      return super.isAvailable() && !namespaceQueue.hasExclusiveLock();
+      // if there are no items in the queue, or the namespace is locked.
+      // we can't execute operation on this table
+      if (isEmpty() || namespaceQueue.hasExclusiveLock()) {
+        return false;
+      }
+
+      if (hasExclusiveLock()) {
+        // if we have an exclusive lock already taken
+        // only child of the lock owner can be executed
+        Procedure availProc = peek();
+        return availProc != null && availProc.hasParent() &&
+               isLockOwner(availProc.getParentProcId());
+      }
+
+      // no xlock
+      return true;
+    }
+
+    public synchronized RegionEvent getRegionEvent(final HRegionInfo regionInfo) {
+      if (regionEventMap == null) {
+        regionEventMap = new HashMap<HRegionInfo, RegionEvent>();
+      }
+      RegionEvent event = regionEventMap.get(regionInfo);
+      if (event == null) {
+        event = new RegionEvent(regionInfo);
+        regionEventMap.put(regionInfo, event);
+      }
+      return event;
+    }
+
+    public synchronized void removeRegionEvent(final RegionEvent event) {
+      regionEventMap.remove(event.getRegionInfo());
+      if (regionEventMap.isEmpty()) {
+        regionEventMap = null;
+      }
     }
 
     // TODO: We can abort pending/in-progress operation if the new call is
@@ -630,6 +779,13 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
           return !tpi.getTableName().equals(TableName.NAMESPACE_TABLE_NAME);
         case READ:
           return false;
+        // region operations are using the shared-lock on the table
+        // and then they will grab an xlock on the region.
+        case SPLIT:
+        case MERGE:
+        case ASSIGN:
+        case UNASSIGN:
+          return false;
         default:
           break;
       }
@@ -883,6 +1039,100 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
   }
 
   // ============================================================================
+  //  Region Locking Helpers
+  // ============================================================================
+  public boolean waitRegion(final Procedure procedure, final HRegionInfo regionInfo) {
+    return waitRegions(procedure, regionInfo.getTable(), regionInfo);
+  }
+
+  public boolean waitRegions(final Procedure procedure, final TableName table,
+      final HRegionInfo... regionInfo) {
+    Arrays.sort(regionInfo);
+
+    final TableQueue queue;
+    if (procedure.hasParent()) {
+      // the assumption is that the parent procedure have already the table xlock
+      queue = getTableQueueWithLock(table);
+    } else {
+      // acquire the table shared-lock
+      queue = tryAcquireTableQueueSharedLock(procedure, table);
+      if (queue == null) return false;
+    }
+
+    // acquire region xlocks or wait
+    boolean hasLock = true;
+    final RegionEvent[] event = new RegionEvent[regionInfo.length];
+    synchronized (queue) {
+      for (int i = 0; i < regionInfo.length; ++i) {
+        assert regionInfo[i].getTable().equals(table);
+        event[i] = queue.getRegionEvent(regionInfo[i]);
+        if (!event[i].tryExclusiveLock(procedure.getProcId())) {
+          suspendProcedure(event[i], procedure);
+          hasLock = false;
+          while (i-- > 0) {
+            event[i].releaseExclusiveLock();
+          }
+          break;
+        }
+      }
+    }
+
+    if (!hasLock && !procedure.hasParent()) {
+      releaseTableSharedLock(procedure, table);
+    }
+    return hasLock;
+  }
+
+  public void wakeRegion(final Procedure procedure, final HRegionInfo regionInfo) {
+    wakeRegions(procedure, regionInfo.getTable(), regionInfo);
+  }
+
+  public void wakeRegions(final Procedure procedure,final TableName table,
+      final HRegionInfo... regionInfo) {
+    Arrays.sort(regionInfo);
+
+    final TableQueue queue = getTableQueueWithLock(table);
+
+    int numProcs = 0;
+    final Procedure[] nextProcs = new Procedure[regionInfo.length];
+    synchronized (queue) {
+      for (int i = 0; i < regionInfo.length; ++i) {
+        assert regionInfo[i].getTable().equals(table);
+        RegionEvent event = queue.getRegionEvent(regionInfo[i]);
+        event.releaseExclusiveLock();
+        if (event.hasWaitingProcedures()) {
+          // release one procedure at the time since regions has an xlock
+          nextProcs[numProcs++] = event.popWaitingProcedure(true);
+        } else {
+          queue.removeRegionEvent(event);
+        }
+      }
+    }
+
+    // awake procedures if any
+    schedLock.lock();
+    try {
+      for (int i = numProcs - 1; i >= 0; --i) {
+        wakeProcedure(nextProcs[i]);
+      }
+
+      if (numProcs > 1) {
+        schedWaitCond.signalAll();
+      } else if (numProcs > 0) {
+        schedWaitCond.signal();
+      }
+
+      if (!procedure.hasParent()) {
+        // release the table shared-lock.
+        // (if we have a parent, it is holding an xlock so we didn't take the shared-lock)
+        releaseTableSharedLock(procedure, table);
+      }
+    } finally {
+      schedLock.unlock();
+    }
+  }
+
+  // ============================================================================
   //  Namespace Locking Helpers
   // ============================================================================
   /**
@@ -1080,6 +1330,10 @@ public class MasterProcedureScheduler implements ProcedureRunnableSet {
       return sharedLock == 1;
     }
 
+    public synchronized boolean isLockOwner(long procId) {
+      return exclusiveLockProcIdOwner == procId;
+    }
+
     public synchronized boolean tryExclusiveLock(long procIdOwner) {
       assert procIdOwner != Long.MIN_VALUE;
       if (isLocked()) return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
index cc088f3..deaf406 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/TableProcedureInterface.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 public interface TableProcedureInterface {
   public enum TableOperationType {
     CREATE, DELETE, DISABLE, EDIT, ENABLE, READ,
+    SPLIT, MERGE, ASSIGN, UNASSIGN, /* region operations */
   };
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d9b7d5/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 12042d8..9c37404 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashSet;
@@ -29,12 +28,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.util.Bytes;
 
 import org.junit.After;
 import org.junit.Before;
@@ -60,7 +62,8 @@ public class TestMasterProcedureScheduler {
 
   @After
   public void tearDown() throws IOException {
-    assertEquals(0, queue.size());
+    assertEquals("proc-queue expected to be empty", 0, queue.size());
+    queue.clear();
   }
 
   @Test
@@ -346,6 +349,201 @@ public class TestMasterProcedureScheduler {
     assertEquals(4, procId);
   }
 
+  @Test
+  public void testVerifyRegionLocks() throws Exception {
+    final TableName tableName = TableName.valueOf("testtb");
+    final HRegionInfo regionA = new HRegionInfo(tableName, Bytes.toBytes("a"), Bytes.toBytes("b"));
+    final HRegionInfo regionB = new HRegionInfo(tableName, Bytes.toBytes("b"), Bytes.toBytes("c"));
+    final HRegionInfo regionC = new HRegionInfo(tableName, Bytes.toBytes("c"), Bytes.toBytes("d"));
+
+    queue.addBack(new TestTableProcedure(1, tableName,
+          TableProcedureInterface.TableOperationType.EDIT));
+    queue.addBack(new TestRegionProcedure(2, tableName,
+        TableProcedureInterface.TableOperationType.MERGE, regionA, regionB));
+    queue.addBack(new TestRegionProcedure(3, tableName,
+        TableProcedureInterface.TableOperationType.SPLIT, regionA));
+    queue.addBack(new TestRegionProcedure(4, tableName,
+        TableProcedureInterface.TableOperationType.SPLIT, regionB));
+    queue.addBack(new TestRegionProcedure(5, tableName,
+        TableProcedureInterface.TableOperationType.UNASSIGN, regionC));
+
+    // Fetch the 1st item and take the write lock
+    Procedure proc = queue.poll();
+    assertEquals(1, proc.getProcId());
+    assertEquals(true, queue.tryAcquireTableExclusiveLock(proc, tableName));
+
+    // everything is locked by the table operation
+    assertEquals(null, queue.poll(0));
+
+    // release the table lock
+    queue.releaseTableExclusiveLock(proc, tableName);
+
+    // Fetch the 2nd item and the the lock on regionA and regionB
+    Procedure mergeProc = queue.poll();
+    assertEquals(2, mergeProc.getProcId());
+    assertEquals(true, queue.waitRegions(mergeProc, tableName, regionA, regionB));
+
+    // Fetch the 3rd item and the try to lock region A which will fail
+    // because already locked. this procedure will go in waiting.
+    // (this stuff will be explicit until we get rid of the zk-lock)
+    Procedure procA = queue.poll();
+    assertEquals(3, procA.getProcId());
+    assertEquals(false, queue.waitRegions(procA, tableName, regionA));
+
+    // Fetch the 4th item, same story as the 3rd
+    Procedure procB = queue.poll();
+    assertEquals(4, procB.getProcId());
+    assertEquals(false, queue.waitRegions(procB, tableName, regionB));
+
+    // Fetch the 5th item, since it is a non-locked region we are able to execute it
+    Procedure procC = queue.poll();
+    assertEquals(5, procC.getProcId());
+    assertEquals(true, queue.waitRegions(procC, tableName, regionC));
+
+    // 3rd and 4th are in the region suspended queue
+    assertEquals(null, queue.poll(0));
+
+    // Release region A-B from merge operation (procId=2)
+    queue.wakeRegions(mergeProc, tableName, regionA, regionB);
+
+    // Fetch the 3rd item, now the lock on the region is available
+    procA = queue.poll();
+    assertEquals(3, procA.getProcId());
+    assertEquals(true, queue.waitRegions(procA, tableName, regionA));
+
+    // Fetch the 4th item, now the lock on the region is available
+    procB = queue.poll();
+    assertEquals(4, procB.getProcId());
+    assertEquals(true, queue.waitRegions(procB, tableName, regionB));
+
+    // release the locks on the regions
+    queue.wakeRegions(procA, tableName, regionA);
+    queue.wakeRegions(procB, tableName, regionB);
+    queue.wakeRegions(procC, tableName, regionC);
+  }
+
+  @Test
+  public void testVerifySubProcRegionLocks() throws Exception {
+    final TableName tableName = TableName.valueOf("testVerifySubProcRegionLocks");
+    final HRegionInfo regionA = new HRegionInfo(tableName, Bytes.toBytes("a"), Bytes.toBytes("b"));
+    final HRegionInfo regionB = new HRegionInfo(tableName, Bytes.toBytes("b"), Bytes.toBytes("c"));
+    final HRegionInfo regionC = new HRegionInfo(tableName, Bytes.toBytes("c"), Bytes.toBytes("d"));
+
+    queue.addBack(new TestTableProcedure(1, tableName,
+        TableProcedureInterface.TableOperationType.ENABLE));
+
+    // Fetch the 1st item from the queue, "the root procedure" and take the table lock
+    Procedure rootProc = queue.poll();
+    assertEquals(1, rootProc.getProcId());
+    assertEquals(true, queue.tryAcquireTableExclusiveLock(rootProc, tableName));
+    assertEquals(null, queue.poll(0));
+
+    // Execute the 1st step of the root-proc.
+    // we should get 3 sub-proc back, one for each region.
+    // (this step is done by the executor/rootProc, we are simulating it)
+    Procedure[] subProcs = new Procedure[] {
+      new TestRegionProcedure(1, 2, tableName,
+        TableProcedureInterface.TableOperationType.ASSIGN, regionA),
+      new TestRegionProcedure(1, 3, tableName,
+        TableProcedureInterface.TableOperationType.ASSIGN, regionB),
+      new TestRegionProcedure(1, 4, tableName,
+        TableProcedureInterface.TableOperationType.ASSIGN, regionC),
+    };
+
+    // at this point the rootProc is going in a waiting state
+    // and the sub-procedures will be added in the queue.
+    // (this step is done by the executor, we are simulating it)
+    for (int i = subProcs.length - 1; i >= 0; --i) {
+      queue.addFront(subProcs[i]);
+    }
+    assertEquals(subProcs.length, queue.size());
+
+    // we should be able to fetch and execute all the sub-procs,
+    // since they are operating on different regions
+    for (int i = 0; i < subProcs.length; ++i) {
+      TestRegionProcedure regionProc = (TestRegionProcedure)queue.poll(0);
+      assertEquals(subProcs[i].getProcId(), regionProc.getProcId());
+      assertEquals(true, queue.waitRegions(regionProc, tableName, regionProc.getRegionInfo()));
+    }
+
+    // nothing else in the queue
+    assertEquals(null, queue.poll(0));
+
+    // release all the region locks
+    for (int i = 0; i < subProcs.length; ++i) {
+      TestRegionProcedure regionProc = (TestRegionProcedure)subProcs[i];
+      queue.wakeRegions(regionProc, tableName, regionProc.getRegionInfo());
+    }
+
+    // nothing else in the queue
+    assertEquals(null, queue.poll(0));
+
+    // release the table lock (for the root procedure)
+    queue.releaseTableExclusiveLock(rootProc, tableName);
+  }
+
+  @Test
+  public void testSuspendedTableQueue() throws Exception {
+    final TableName tableName = TableName.valueOf("testSuspendedQueue");
+
+    queue.addBack(new TestTableProcedure(1, tableName,
+        TableProcedureInterface.TableOperationType.EDIT));
+    queue.addBack(new TestTableProcedure(2, tableName,
+        TableProcedureInterface.TableOperationType.EDIT));
+
+    Procedure proc = queue.poll();
+    assertEquals(1, proc.getProcId());
+    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
+
+    // Suspend
+    // TODO: If we want to keep the zk-lock we need to retain the lock on suspend
+    ProcedureEvent event = new ProcedureEvent("testSuspendedTableQueueEvent");
+    queue.waitEvent(event, proc, true);
+    queue.releaseTableExclusiveLock(proc, tableName);
+    assertEquals(null, queue.poll(0));
+
+    // Resume
+    queue.wake(event);
+
+    proc = queue.poll();
+    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(1, proc.getProcId());
+    queue.releaseTableExclusiveLock(proc, tableName);
+
+    proc = queue.poll();
+    assertTrue(queue.tryAcquireTableExclusiveLock(proc, tableName));
+    assertEquals(2, proc.getProcId());
+    queue.releaseTableExclusiveLock(proc, tableName);
+  }
+
+  @Test
+  public void testSuspendedProcedure() throws Exception {
+    final TableName tableName = TableName.valueOf("testSuspendedProcedure");
+
+    queue.addBack(new TestTableProcedure(1, tableName,
+        TableProcedureInterface.TableOperationType.READ));
+    queue.addBack(new TestTableProcedure(2, tableName,
+        TableProcedureInterface.TableOperationType.READ));
+
+    Procedure proc = queue.poll();
+    assertEquals(1, proc.getProcId());
+
+    // suspend
+    ProcedureEvent event = new ProcedureEvent("testSuspendedProcedureEvent");
+    queue.waitEvent(event, proc);
+
+    proc = queue.poll();
+    assertEquals(2, proc.getProcId());
+    assertEquals(null, queue.poll(0));
+
+    // resume
+    queue.wake(event);
+
+    proc = queue.poll();
+    assertEquals(1, proc.getProcId());
+    assertEquals(null, queue.poll(0));
+  }
+
   /**
    * Verify that "write" operations for a single table are serialized,
    * but different tables can be executed in parallel.
@@ -522,6 +720,32 @@ public class TestMasterProcedureScheduler {
     }
   }
 
+  public static class TestRegionProcedure extends TestTableProcedure {
+    private final HRegionInfo[] regionInfo;
+
+    public TestRegionProcedure() {
+      throw new UnsupportedOperationException("recovery should not be triggered here");
+    }
+
+    public TestRegionProcedure(long procId, TableName tableName, TableOperationType opType,
+        HRegionInfo... regionInfo) {
+      this(-1, procId, tableName, opType, regionInfo);
+    }
+
+    public TestRegionProcedure(long parentProcId, long procId, TableName tableName,
+        TableOperationType opType, HRegionInfo... regionInfo) {
+      super(procId, tableName, opType);
+      this.regionInfo = regionInfo;
+      if (parentProcId > 0) {
+        setParentProcId(parentProcId);
+      }
+    }
+
+    public HRegionInfo[] getRegionInfo() {
+      return regionInfo;
+    }
+  }
+
   public static class TestNamespaceProcedure extends TestProcedure
       implements TableProcedureInterface {
     private final TableOperationType opType;


[17/50] hbase git commit: HBASE-15888 Extend HBASE-12769 for bulk load data replication

Posted by sy...@apache.org.
HBASE-15888 Extend HBASE-12769 for bulk load data replication


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0cbce076
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0cbce076
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0cbce076

Branch: refs/heads/hbase-12439
Commit: 0cbce07626b77d9aa75a16f5e52c19428865dce7
Parents: 72d3f2a
Author: Ashish Singhi <as...@apache.org>
Authored: Fri Jun 3 18:42:00 2016 +0530
Committer: Ashish Singhi <as...@apache.org>
Committed: Fri Jun 3 18:42:00 2016 +0530

----------------------------------------------------------------------
 .../replication/ReplicationPeersZKImpl.java     |  6 ++
 .../hbase/util/hbck/ReplicationChecker.java     | 59 ++++++++++++++++++--
 2 files changed, 61 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbce076/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 09d2100..15265d9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -550,6 +550,12 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
           }
         }
       }
+      // Check for hfile-refs queue
+      if (-1 != ZKUtil.checkExists(zookeeper, hfileRefsZNode)
+          && queuesClient.getAllPeersFromHFileRefsQueue().contains(peerId)) {
+        throw new ReplicationException("Undeleted queue for peerId: " + peerId
+            + ", found in hfile-refs node path " + hfileRefsZNode);
+      }
     } catch (KeeperException e) {
       throw new ReplicationException("Could not check queues deleted with id=" + peerId, e);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cbce076/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
index 89f2557..e472558 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/ReplicationChecker.java
@@ -51,16 +51,21 @@ import org.apache.zookeeper.KeeperException;
 @InterfaceAudience.Private
 public class ReplicationChecker {
   private static final Log LOG = LogFactory.getLog(ReplicationChecker.class);
+  private final ZooKeeperWatcher zkw;
   private final ErrorReporter errorReporter;
   private final ReplicationQueuesClient queuesClient;
   private final ReplicationPeers replicationPeers;
   private final ReplicationQueueDeletor queueDeletor;
   // replicator with its queueIds for removed peers
   private final Map<String, List<String>> undeletedQueueIds = new HashMap<>();
-  
+  // replicator with its undeleted queueIds for removed peers in hfile-refs queue
+  private Set<String> undeletedHFileRefsQueueIds = new HashSet<>();
+  private final String hfileRefsZNode;
+
   public ReplicationChecker(Configuration conf, ZooKeeperWatcher zkw, ClusterConnection connection,
       ErrorReporter errorReporter) throws IOException {
     try {
+      this.zkw = zkw;
       this.errorReporter = errorReporter;
       this.queuesClient = ReplicationFactory.getReplicationQueuesClient(zkw, conf, connection);
       this.queuesClient.init();
@@ -71,6 +76,13 @@ public class ReplicationChecker {
     } catch (ReplicationException e) {
       throw new IOException("failed to construct ReplicationChecker", e);
     }
+
+    String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
+    String replicationZNode = ZKUtil.joinZNode(this.zkw.baseZNode, replicationZNodeName);
+    String hfileRefsZNodeName =
+        conf.get(ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY,
+          ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT);
+    hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName);
   }
 
   public boolean hasUnDeletedQueues() {
@@ -103,13 +115,37 @@ public class ReplicationChecker {
     } catch (KeeperException ke) {
       throw new IOException(ke);
     }
+
+    checkUnDeletedHFileRefsQueues(peerIds);
+  }
+
+  private void checkUnDeletedHFileRefsQueues(Set<String> peerIds) throws IOException {
+    try {
+      if (-1 == ZKUtil.checkExists(zkw, hfileRefsZNode)) {
+        return;
+      }
+      List<String> listOfPeers = this.queuesClient.getAllPeersFromHFileRefsQueue();
+      Set<String> peers = new HashSet<>(listOfPeers);
+      peers.removeAll(peerIds);
+      if (!peers.isEmpty()) {
+        undeletedHFileRefsQueueIds.addAll(peers);
+        String msg =
+            "Undeleted replication hfile-refs queue for removed peer found: "
+                + undeletedHFileRefsQueueIds + " under hfile-refs node " + hfileRefsZNode;
+        errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.UNDELETED_REPLICATION_QUEUE,
+          msg);
+      }
+    } catch (KeeperException e) {
+      throw new IOException("Failed to get list of all peers from hfile-refs znode "
+          + hfileRefsZNode, e);
+    }
   }
-  
+
   private static class ReplicationQueueDeletor extends ReplicationStateZKBase {
     public ReplicationQueueDeletor(ZooKeeperWatcher zk, Configuration conf, Abortable abortable) {
       super(zk, conf, abortable);
     }
-    
+
     public void removeQueue(String replicator, String queueId) throws IOException {
       String queueZnodePath = ZKUtil.joinZNode(ZKUtil.joinZNode(this.queuesZNode, replicator),
         queueId);
@@ -122,7 +158,7 @@ public class ReplicationChecker {
       }
     }
   }
-  
+
   public void fixUnDeletedQueues() throws IOException {
     for (Entry<String, List<String>> replicatorAndQueueIds : undeletedQueueIds.entrySet()) {
       String replicator = replicatorAndQueueIds.getKey();
@@ -130,5 +166,20 @@ public class ReplicationChecker {
         queueDeletor.removeQueue(replicator, queueId);
       }
     }
+    fixUnDeletedHFileRefsQueue();
+  }
+
+  private void fixUnDeletedHFileRefsQueue() throws IOException {
+    for (String hfileRefsQueueId : undeletedHFileRefsQueueIds) {
+      String node = ZKUtil.joinZNode(hfileRefsZNode, hfileRefsQueueId);
+      try {
+        ZKUtil.deleteNodeRecursively(this.zkw, node);
+        LOG.info("Successfully deleted hfile-refs queue " + hfileRefsQueueId + " from path "
+            + hfileRefsZNode);
+      } catch (KeeperException e) {
+        throw new IOException("Failed to delete hfile-refs queue " + hfileRefsQueueId
+            + " from path " + hfileRefsZNode);
+      }
+    }
   }
 }