You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by oc...@apache.org on 2015/06/02 12:00:14 UTC

[7/8] hbase git commit: HBASE-13661 Correct binary compatibility issues discovered in 1.1.0RC0

HBASE-13661 Correct binary compatibility issues discovered in 1.1.0RC0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e860c66d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e860c66d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e860c66d

Branch: refs/heads/branch-1.1.0
Commit: e860c66d41ddc8231004b646098a58abca7fb523
Parents: 5ebd912
Author: Enis Soztutar <en...@apache.org>
Authored: Mon May 11 18:33:44 2015 -0700
Committer: Nick Dimiduk <nd...@apache.org>
Committed: Tue May 12 12:08:52 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ServerLoad.java     |   4 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |  12 ++-
 .../org/apache/hadoop/hbase/client/Result.java  |   2 +
 .../org/apache/hadoop/hbase/client/Scan.java    |   8 ++
 .../java/org/apache/hadoop/hbase/AuthUtil.java  |   2 +-
 .../hbase/master/MetricsMasterSource.java       |   4 +-
 .../hbase/master/MetricsMasterSourceImpl.java   |   3 +-
 .../procedure2/store/ProcedureStoreTracker.java |   2 +-
 .../CorruptedWALProcedureStoreException.java    |   2 +-
 .../protobuf/generated/ClusterStatusProtos.java | 100 +++++++++----------
 .../src/main/protobuf/ClusterStatus.proto       |   4 +-
 .../rest/model/StorageClusterStatusModel.java   |  30 +++---
 .../generated/StorageClusterStatusMessage.java  |  96 +++++++++---------
 .../protobuf/StorageClusterStatusMessage.proto  |   4 +-
 .../hadoop/hbase/master/MetricsMaster.java      |   2 +-
 .../hbase/namespace/NamespaceAuditor.java       |   2 +-
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |   3 +
 17 files changed, 149 insertions(+), 131 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 4f4d301..3d502c0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -86,7 +86,7 @@ public class ServerLoad {
   protected ClusterStatusProtos.ServerLoad serverLoad;
 
   /* @return number of requests  since last report. */
-  public int getNumberOfRequests() {
+  public long getNumberOfRequests() {
     return serverLoad.getNumberOfRequests();
   }
   public boolean hasNumberOfRequests() {
@@ -94,7 +94,7 @@ public class ServerLoad {
   }
 
   /* @return total Number of requests from the start of the region server. */
-  public int getTotalNumberOfRequests() {
+  public long getTotalNumberOfRequests() {
     return serverLoad.getTotalNumberOfRequests();
   }
   public boolean hasTotalNumberOfRequests() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 8c98d69..f82e554 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -49,8 +49,6 @@ import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
 import org.apache.hadoop.hbase.client.coprocessor.Batch;
 import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
@@ -107,7 +105,7 @@ import com.google.protobuf.ServiceException;
  */
 @InterfaceAudience.Private
 @InterfaceStability.Stable
-public class HTable implements HTableInterface {
+public class HTable implements HTableInterface, RegionLocator {
   private static final Log LOG = LogFactory.getLog(HTable.class);
   protected ClusterConnection connection;
   private final TableName tableName;
@@ -364,7 +362,7 @@ public class HTable implements HTableInterface {
     multiAp = this.connection.getAsyncProcess();
 
     this.closed = false;
-    
+
     this.locator = new HRegionLocator(tableName, connection);
   }
 
@@ -481,6 +479,7 @@ public class HTable implements HTableInterface {
   /**
    * @deprecated Use {@link RegionLocator#getRegionLocation(byte[])} instead.
    */
+  @Override
   @Deprecated
   public HRegionLocation getRegionLocation(final byte [] row)
   throws IOException {
@@ -490,6 +489,7 @@ public class HTable implements HTableInterface {
   /**
    * @deprecated Use {@link RegionLocator#getRegionLocation(byte[], boolean)} instead.
    */
+  @Override
   @Deprecated
   public HRegionLocation getRegionLocation(final byte [] row, boolean reload)
   throws IOException {
@@ -601,6 +601,7 @@ public class HTable implements HTableInterface {
   /**
    * @deprecated Use {@link RegionLocator#getStartEndKeys()} instead;
    */
+  @Override
   @Deprecated
   public byte [][] getStartKeys() throws IOException {
     return locator.getStartKeys();
@@ -609,6 +610,7 @@ public class HTable implements HTableInterface {
   /**
    * @deprecated Use {@link RegionLocator#getEndKeys()} instead;
    */
+  @Override
   @Deprecated
   public byte[][] getEndKeys() throws IOException {
     return locator.getEndKeys();
@@ -617,6 +619,7 @@ public class HTable implements HTableInterface {
   /**
    * @deprecated Use {@link RegionLocator#getStartEndKeys()} instead;
    */
+  @Override
   @Deprecated
   public Pair<byte[][],byte[][]> getStartEndKeys() throws IOException {
     return locator.getStartEndKeys();
@@ -645,6 +648,7 @@ public class HTable implements HTableInterface {
    *
    * @deprecated Use {@link RegionLocator#getAllRegionLocations()} instead;
    */
+  @Override
   @Deprecated
   public List<HRegionLocation> getAllRegionLocations() throws IOException {
     return locator.getAllRegionLocations();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
index bfec680..68eae5a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Result.java
@@ -986,6 +986,7 @@ public class Result implements CellScannable, CellScanner {
    * @throws UnsupportedOperationException if invoked on instance of EMPTY_RESULT
    * (which is supposed to be immutable).
    */
+  @InterfaceAudience.Private
   @Deprecated
   public void addResults(ClientProtos.RegionLoadStats loadStats) {
     checkReadonly();
@@ -996,6 +997,7 @@ public class Result implements CellScannable, CellScanner {
    * Set load information about the region to the information about the result
    * @param loadStats statistics about the current region from which this was returned
    */
+  @InterfaceAudience.Private
   public void setStatistics(ClientProtos.RegionLoadStats loadStats) {
     this.stats = loadStats;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index 4604c32..69c9591 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -134,6 +134,14 @@ public class Scan extends Query {
   // scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(tableName))
   static public final String SCAN_ATTRIBUTES_TABLE_NAME = "scan.attributes.table.name";
 
+  /**
+   * @deprecated without replacement
+   *             This is now a no-op, SEEKs and SKIPs are optimizated automatically.
+   *             Will be removed in 2.0+
+   */
+  @Deprecated
+  public static final String HINT_LOOKAHEAD = "_look_ahead_";
+
   /*
    * -1 means no caching
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
index 4754ea4..b12b1e0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/AuthUtil.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 /**
  * Utility methods for helping with security tasks.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class AuthUtil {
   private static final Log LOG = LogFactory.getLog(AuthUtil.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
index 8d54190..7dcb500 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java
@@ -79,9 +79,9 @@ public interface MetricsMasterSource extends BaseSource {
    *
    * @param inc Ammount to increment the total by.
    */
-  void incRequests(final int inc);
+  void incRequests(final long inc);
+
 
-  
 
 
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
index 02463f6..c5ce5e4 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java
@@ -61,7 +61,8 @@ public class MetricsMasterSourceImpl
     clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0l);
   }
 
-  public void incRequests(final int inc) {
+  @Override
+  public void incRequests(final long inc) {
     this.clusterRequestsCounter.incr(inc);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
index 3a878cc..07fb026 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreTracker.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
  * It can be used by the ProcedureStore to identify which procedures are already
  * deleted/completed to avoid the deserialization step on restart.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 @InterfaceStability.Evolving
 public class ProcedureStoreTracker {
   private final TreeMap<Long, BitSetNode> map = new TreeMap<Long, BitSetNode>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
index 29db3bf..3292478 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 /**
  * Thrown when a procedure WAL is corrupted
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 @InterfaceStability.Stable
 public class CorruptedWALProcedureStoreException extends HBaseIOException {
   /** default constructor */

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5fc4f43..8176947 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -7769,9 +7769,9 @@ public final class ClusterStatusProtos {
   public interface ServerLoadOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // optional uint32 number_of_requests = 1;
+    // optional uint64 number_of_requests = 1;
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
+     * <code>optional uint64 number_of_requests = 1;</code>
      *
      * <pre>
      ** Number of requests since last report. 
@@ -7779,17 +7779,17 @@ public final class ClusterStatusProtos {
      */
     boolean hasNumberOfRequests();
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
+     * <code>optional uint64 number_of_requests = 1;</code>
      *
      * <pre>
      ** Number of requests since last report. 
      * </pre>
      */
-    int getNumberOfRequests();
+    long getNumberOfRequests();
 
-    // optional uint32 total_number_of_requests = 2;
+    // optional uint64 total_number_of_requests = 2;
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
+     * <code>optional uint64 total_number_of_requests = 2;</code>
      *
      * <pre>
      ** Total Number of requests from the start of the region server. 
@@ -7797,13 +7797,13 @@ public final class ClusterStatusProtos {
      */
     boolean hasTotalNumberOfRequests();
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
+     * <code>optional uint64 total_number_of_requests = 2;</code>
      *
      * <pre>
      ** Total Number of requests from the start of the region server. 
      * </pre>
      */
-    int getTotalNumberOfRequests();
+    long getTotalNumberOfRequests();
 
     // optional uint32 used_heap_MB = 3;
     /**
@@ -8146,12 +8146,12 @@ public final class ClusterStatusProtos {
             }
             case 8: {
               bitField0_ |= 0x00000001;
-              numberOfRequests_ = input.readUInt32();
+              numberOfRequests_ = input.readUInt64();
               break;
             }
             case 16: {
               bitField0_ |= 0x00000002;
-              totalNumberOfRequests_ = input.readUInt32();
+              totalNumberOfRequests_ = input.readUInt64();
               break;
             }
             case 24: {
@@ -8265,11 +8265,11 @@ public final class ClusterStatusProtos {
     }
 
     private int bitField0_;
-    // optional uint32 number_of_requests = 1;
+    // optional uint64 number_of_requests = 1;
     public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1;
-    private int numberOfRequests_;
+    private long numberOfRequests_;
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
+     * <code>optional uint64 number_of_requests = 1;</code>
      *
      * <pre>
      ** Number of requests since last report. 
@@ -8279,21 +8279,21 @@ public final class ClusterStatusProtos {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
+     * <code>optional uint64 number_of_requests = 1;</code>
      *
      * <pre>
      ** Number of requests since last report. 
      * </pre>
      */
-    public int getNumberOfRequests() {
+    public long getNumberOfRequests() {
       return numberOfRequests_;
     }
 
-    // optional uint32 total_number_of_requests = 2;
+    // optional uint64 total_number_of_requests = 2;
     public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2;
-    private int totalNumberOfRequests_;
+    private long totalNumberOfRequests_;
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
+     * <code>optional uint64 total_number_of_requests = 2;</code>
      *
      * <pre>
      ** Total Number of requests from the start of the region server. 
@@ -8303,13 +8303,13 @@ public final class ClusterStatusProtos {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
+     * <code>optional uint64 total_number_of_requests = 2;</code>
      *
      * <pre>
      ** Total Number of requests from the start of the region server. 
      * </pre>
      */
-    public int getTotalNumberOfRequests() {
+    public long getTotalNumberOfRequests() {
       return totalNumberOfRequests_;
     }
 
@@ -8673,8 +8673,8 @@ public final class ClusterStatusProtos {
     }
 
     private void initFields() {
-      numberOfRequests_ = 0;
-      totalNumberOfRequests_ = 0;
+      numberOfRequests_ = 0L;
+      totalNumberOfRequests_ = 0L;
       usedHeapMB_ = 0;
       maxHeapMB_ = 0;
       regionLoads_ = java.util.Collections.emptyList();
@@ -8722,10 +8722,10 @@ public final class ClusterStatusProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeUInt32(1, numberOfRequests_);
+        output.writeUInt64(1, numberOfRequests_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeUInt32(2, totalNumberOfRequests_);
+        output.writeUInt64(2, totalNumberOfRequests_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeUInt32(3, usedHeapMB_);
@@ -8765,11 +8765,11 @@ public final class ClusterStatusProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(1, numberOfRequests_);
+          .computeUInt64Size(1, numberOfRequests_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(2, totalNumberOfRequests_);
+          .computeUInt64Size(2, totalNumberOfRequests_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
@@ -8891,11 +8891,11 @@ public final class ClusterStatusProtos {
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasNumberOfRequests()) {
         hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER;
-        hash = (53 * hash) + getNumberOfRequests();
+        hash = (53 * hash) + hashLong(getNumberOfRequests());
       }
       if (hasTotalNumberOfRequests()) {
         hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER;
-        hash = (53 * hash) + getTotalNumberOfRequests();
+        hash = (53 * hash) + hashLong(getTotalNumberOfRequests());
       }
       if (hasUsedHeapMB()) {
         hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER;
@@ -9046,9 +9046,9 @@ public final class ClusterStatusProtos {
 
       public Builder clear() {
         super.clear();
-        numberOfRequests_ = 0;
+        numberOfRequests_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000001);
-        totalNumberOfRequests_ = 0;
+        totalNumberOfRequests_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000002);
         usedHeapMB_ = 0;
         bitField0_ = (bitField0_ & ~0x00000004);
@@ -9344,10 +9344,10 @@ public final class ClusterStatusProtos {
       }
       private int bitField0_;
 
-      // optional uint32 number_of_requests = 1;
-      private int numberOfRequests_ ;
+      // optional uint64 number_of_requests = 1;
+      private long numberOfRequests_ ;
       /**
-       * <code>optional uint32 number_of_requests = 1;</code>
+       * <code>optional uint64 number_of_requests = 1;</code>
        *
        * <pre>
        ** Number of requests since last report. 
@@ -9357,30 +9357,30 @@ public final class ClusterStatusProtos {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>optional uint32 number_of_requests = 1;</code>
+       * <code>optional uint64 number_of_requests = 1;</code>
        *
        * <pre>
        ** Number of requests since last report. 
        * </pre>
        */
-      public int getNumberOfRequests() {
+      public long getNumberOfRequests() {
         return numberOfRequests_;
       }
       /**
-       * <code>optional uint32 number_of_requests = 1;</code>
+       * <code>optional uint64 number_of_requests = 1;</code>
        *
        * <pre>
        ** Number of requests since last report. 
        * </pre>
        */
-      public Builder setNumberOfRequests(int value) {
+      public Builder setNumberOfRequests(long value) {
         bitField0_ |= 0x00000001;
         numberOfRequests_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional uint32 number_of_requests = 1;</code>
+       * <code>optional uint64 number_of_requests = 1;</code>
        *
        * <pre>
        ** Number of requests since last report. 
@@ -9388,15 +9388,15 @@ public final class ClusterStatusProtos {
        */
       public Builder clearNumberOfRequests() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        numberOfRequests_ = 0;
+        numberOfRequests_ = 0L;
         onChanged();
         return this;
       }
 
-      // optional uint32 total_number_of_requests = 2;
-      private int totalNumberOfRequests_ ;
+      // optional uint64 total_number_of_requests = 2;
+      private long totalNumberOfRequests_ ;
       /**
-       * <code>optional uint32 total_number_of_requests = 2;</code>
+       * <code>optional uint64 total_number_of_requests = 2;</code>
        *
        * <pre>
        ** Total Number of requests from the start of the region server. 
@@ -9406,30 +9406,30 @@ public final class ClusterStatusProtos {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
-       * <code>optional uint32 total_number_of_requests = 2;</code>
+       * <code>optional uint64 total_number_of_requests = 2;</code>
        *
        * <pre>
        ** Total Number of requests from the start of the region server. 
        * </pre>
        */
-      public int getTotalNumberOfRequests() {
+      public long getTotalNumberOfRequests() {
         return totalNumberOfRequests_;
       }
       /**
-       * <code>optional uint32 total_number_of_requests = 2;</code>
+       * <code>optional uint64 total_number_of_requests = 2;</code>
        *
        * <pre>
        ** Total Number of requests from the start of the region server. 
        * </pre>
        */
-      public Builder setTotalNumberOfRequests(int value) {
+      public Builder setTotalNumberOfRequests(long value) {
         bitField0_ |= 0x00000002;
         totalNumberOfRequests_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional uint32 total_number_of_requests = 2;</code>
+       * <code>optional uint64 total_number_of_requests = 2;</code>
        *
        * <pre>
        ** Total Number of requests from the start of the region server. 
@@ -9437,7 +9437,7 @@ public final class ClusterStatusProtos {
        */
       public Builder clearTotalNumberOfRequests() {
         bitField0_ = (bitField0_ & ~0x00000002);
-        totalNumberOfRequests_ = 0;
+        totalNumberOfRequests_ = 0L;
         onChanged();
         return this;
       }
@@ -14743,8 +14743,8 @@ public final class ClusterStatusProtos {
       "ageOfLastShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQue" +
       "ue\030\003 \002(\r\022 \n\030timeStampOfLastShippedOp\030\004 \002" +
       "(\004\022\026\n\016replicationLag\030\005 \002(\004\"\346\002\n\nServerLoa" +
-      "d\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total_n" +
-      "umber_of_requests\030\002 \001(\r\022\024\n\014used_heap_MB\030" +
+      "d\022\032\n\022number_of_requests\030\001 \001(\004\022 \n\030total_n" +
+      "umber_of_requests\030\002 \001(\004\022\024\n\014used_heap_MB\030" +
       "\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_loa",
       "ds\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 " +
       "\003(\0132\014.Coprocessor\022\031\n\021report_start_time\030\007" +

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-protocol/src/main/protobuf/ClusterStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 305e08a..597d553 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -154,10 +154,10 @@ message ReplicationLoadSource {
 
 message ServerLoad {
   /** Number of requests since last report. */
-  optional uint32 number_of_requests = 1;
+  optional uint64 number_of_requests = 1;
 
   /** Total Number of requests from the start of the region server. */
-  optional uint32 total_number_of_requests = 2;
+  optional uint64 total_number_of_requests = 2;
 
   /** the amount of used heap, in MB. */
   optional uint32 used_heap_MB = 3;

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
index 3b044e7..32a1691 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  * <li>liveNodes: detailed status of the live region servers</li>
  * <li>deadNodes: the names of region servers declared dead</li>
  * </ul>
- * 
+ *
  * <pre>
  * &lt;complexType name="StorageClusterStatus"&gt;
  *   &lt;sequence&gt;
@@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  *
  * &lt;complexType name="Node"&gt;
  *   &lt;sequence&gt;
- *     &lt;element name="region" type="tns:Region" 
+ *     &lt;element name="region" type="tns:Region"
  *       maxOccurs="unbounded" minOccurs="0"&gt;&lt;/element&gt;
  *   &lt;/sequence&gt;
  *   &lt;attribute name="name" type="string"&gt;&lt;/attribute&gt;
@@ -93,7 +93,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  */
 @XmlRootElement(name="ClusterStatus")
 @InterfaceAudience.Private
-public class StorageClusterStatusModel 
+public class StorageClusterStatusModel
     implements Serializable, ProtobufMessageHandler {
   private static final long serialVersionUID = 1L;
 
@@ -181,7 +181,7 @@ public class StorageClusterStatusModel
       }
 
       /**
-       * @return the number of store files 
+       * @return the number of store files
        */
       @XmlAttribute
       public int getStorefiles() {
@@ -253,7 +253,7 @@ public class StorageClusterStatusModel
       }
 
       /**
-       * @return The total number of compacting key-values 
+       * @return The total number of compacting key-values
        */
       @XmlAttribute
       public long getTotalCompactingKVs() {
@@ -367,7 +367,7 @@ public class StorageClusterStatusModel
 
     private String name;
     private long startCode;
-    private int requests;
+    private long requests;
     private int heapSizeMB;
     private int maxHeapSizeMB;
     private List<Region> regions = new ArrayList<Region>();
@@ -380,7 +380,7 @@ public class StorageClusterStatusModel
         int storefileSizeMB, int memstoreSizeMB, int storefileIndexSizeMB,
         long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
         int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
-        long totalCompactingKVs, long currentCompactedKVs) { 
+        long totalCompactingKVs, long currentCompactedKVs) {
       regions.add(new Region(name, stores, storefiles, storefileSizeMB,
         memstoreSizeMB, storefileIndexSizeMB, readRequestsCount,
         writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
@@ -454,7 +454,7 @@ public class StorageClusterStatusModel
      * @return the number of requests per second processed by the region server
      */
     @XmlAttribute
-    public int getRequests() {
+    public long getRequests() {
       return requests;
     }
 
@@ -497,7 +497,7 @@ public class StorageClusterStatusModel
      * @param requests the number of requests per second processed by the
      * region server
      */
-    public void setRequests(int requests) {
+    public void setRequests(long requests) {
       this.requests = requests;
     }
   }
@@ -505,7 +505,7 @@ public class StorageClusterStatusModel
   private List<Node> liveNodes = new ArrayList<Node>();
   private List<String> deadNodes = new ArrayList<String>();
   private int regions;
-  private int requests;
+  private long requests;
   private double averageLoad;
 
   /**
@@ -584,7 +584,7 @@ public class StorageClusterStatusModel
    * the last reporting interval
    */
   @XmlAttribute
-  public int getRequests() {
+  public long getRequests() {
     return requests;
   }
 
@@ -639,7 +639,7 @@ public class StorageClusterStatusModel
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
-    sb.append(String.format("%d live servers, %d dead servers, " + 
+    sb.append(String.format("%d live servers, %d dead servers, " +
       "%.4f average load%n%n", liveNodes.size(), deadNodes.size(),
       averageLoad));
     if (!liveNodes.isEmpty()) {
@@ -711,7 +711,7 @@ public class StorageClusterStatusModel
     builder.setRequests(requests);
     builder.setAverageLoad(averageLoad);
     for (Node node: liveNodes) {
-      StorageClusterStatus.Node.Builder nodeBuilder = 
+      StorageClusterStatus.Node.Builder nodeBuilder =
         StorageClusterStatus.Node.newBuilder();
       nodeBuilder.setName(node.name);
       nodeBuilder.setStartCode(node.startCode);
@@ -760,10 +760,10 @@ public class StorageClusterStatusModel
     }
     for (StorageClusterStatus.Node node: builder.getLiveNodesList()) {
       long startCode = node.hasStartCode() ? node.getStartCode() : -1;
-      StorageClusterStatusModel.Node nodeModel = 
+      StorageClusterStatusModel.Node nodeModel =
         addLiveNode(node.getName(), startCode, node.getHeapSizeMB(),
           node.getMaxHeapSizeMB());
-      int requests = node.hasRequests() ? node.getRequests() : 0;
+      long requests = node.hasRequests() ? node.getRequests() : 0;
       nodeModel.setRequests(requests);
       for (StorageClusterStatus.Region region: node.getRegionsList()) {
         nodeModel.addRegion(

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
index 05ff7a3..44dce76 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/protobuf/generated/StorageClusterStatusMessage.java
@@ -94,15 +94,15 @@ public final class StorageClusterStatusMessage {
      */
     int getRegions();
 
-    // optional int32 requests = 4;
+    // optional int64 requests = 4;
     /**
-     * <code>optional int32 requests = 4;</code>
+     * <code>optional int64 requests = 4;</code>
      */
     boolean hasRequests();
     /**
-     * <code>optional int32 requests = 4;</code>
+     * <code>optional int64 requests = 4;</code>
      */
-    int getRequests();
+    long getRequests();
 
     // optional double averageLoad = 5;
     /**
@@ -188,7 +188,7 @@ public final class StorageClusterStatusMessage {
             }
             case 32: {
               bitField0_ |= 0x00000002;
-              requests_ = input.readInt32();
+              requests_ = input.readInt64();
               break;
             }
             case 41: {
@@ -1660,15 +1660,15 @@ public final class StorageClusterStatusMessage {
        */
       long getStartCode();
 
-      // optional int32 requests = 3;
+      // optional int64 requests = 3;
       /**
-       * <code>optional int32 requests = 3;</code>
+       * <code>optional int64 requests = 3;</code>
        */
       boolean hasRequests();
       /**
-       * <code>optional int32 requests = 3;</code>
+       * <code>optional int64 requests = 3;</code>
        */
-      int getRequests();
+      long getRequests();
 
       // optional int32 heapSizeMB = 4;
       /**
@@ -1778,7 +1778,7 @@ public final class StorageClusterStatusMessage {
               }
               case 24: {
                 bitField0_ |= 0x00000004;
-                requests_ = input.readInt32();
+                requests_ = input.readInt64();
                 break;
               }
               case 32: {
@@ -1913,19 +1913,19 @@ public final class StorageClusterStatusMessage {
         return startCode_;
       }
 
-      // optional int32 requests = 3;
+      // optional int64 requests = 3;
       public static final int REQUESTS_FIELD_NUMBER = 3;
-      private int requests_;
+      private long requests_;
       /**
-       * <code>optional int32 requests = 3;</code>
+       * <code>optional int64 requests = 3;</code>
        */
       public boolean hasRequests() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
       }
       /**
-       * <code>optional int32 requests = 3;</code>
+       * <code>optional int64 requests = 3;</code>
        */
-      public int getRequests() {
+      public long getRequests() {
         return requests_;
       }
 
@@ -2000,7 +2000,7 @@ public final class StorageClusterStatusMessage {
       private void initFields() {
         name_ = "";
         startCode_ = 0L;
-        requests_ = 0;
+        requests_ = 0L;
         heapSizeMB_ = 0;
         maxHeapSizeMB_ = 0;
         regions_ = java.util.Collections.emptyList();
@@ -2034,7 +2034,7 @@ public final class StorageClusterStatusMessage {
           output.writeInt64(2, startCode_);
         }
         if (((bitField0_ & 0x00000004) == 0x00000004)) {
-          output.writeInt32(3, requests_);
+          output.writeInt64(3, requests_);
         }
         if (((bitField0_ & 0x00000008) == 0x00000008)) {
           output.writeInt32(4, heapSizeMB_);
@@ -2064,7 +2064,7 @@ public final class StorageClusterStatusMessage {
         }
         if (((bitField0_ & 0x00000004) == 0x00000004)) {
           size += com.google.protobuf.CodedOutputStream
-            .computeInt32Size(3, requests_);
+            .computeInt64Size(3, requests_);
         }
         if (((bitField0_ & 0x00000008) == 0x00000008)) {
           size += com.google.protobuf.CodedOutputStream
@@ -2199,7 +2199,7 @@ public final class StorageClusterStatusMessage {
           bitField0_ = (bitField0_ & ~0x00000001);
           startCode_ = 0L;
           bitField0_ = (bitField0_ & ~0x00000002);
-          requests_ = 0;
+          requests_ = 0L;
           bitField0_ = (bitField0_ & ~0x00000004);
           heapSizeMB_ = 0;
           bitField0_ = (bitField0_ & ~0x00000008);
@@ -2495,35 +2495,35 @@ public final class StorageClusterStatusMessage {
           return this;
         }
 
-        // optional int32 requests = 3;
-        private int requests_ ;
+        // optional int64 requests = 3;
+        private long requests_ ;
         /**
-         * <code>optional int32 requests = 3;</code>
+         * <code>optional int64 requests = 3;</code>
          */
         public boolean hasRequests() {
           return ((bitField0_ & 0x00000004) == 0x00000004);
         }
         /**
-         * <code>optional int32 requests = 3;</code>
+         * <code>optional int64 requests = 3;</code>
          */
-        public int getRequests() {
+        public long getRequests() {
           return requests_;
         }
         /**
-         * <code>optional int32 requests = 3;</code>
+         * <code>optional int64 requests = 3;</code>
          */
-        public Builder setRequests(int value) {
+        public Builder setRequests(long value) {
           bitField0_ |= 0x00000004;
           requests_ = value;
           onChanged();
           return this;
         }
         /**
-         * <code>optional int32 requests = 3;</code>
+         * <code>optional int64 requests = 3;</code>
          */
         public Builder clearRequests() {
           bitField0_ = (bitField0_ & ~0x00000004);
-          requests_ = 0;
+          requests_ = 0L;
           onChanged();
           return this;
         }
@@ -2956,19 +2956,19 @@ public final class StorageClusterStatusMessage {
       return regions_;
     }
 
-    // optional int32 requests = 4;
+    // optional int64 requests = 4;
     public static final int REQUESTS_FIELD_NUMBER = 4;
-    private int requests_;
+    private long requests_;
     /**
-     * <code>optional int32 requests = 4;</code>
+     * <code>optional int64 requests = 4;</code>
      */
     public boolean hasRequests() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional int32 requests = 4;</code>
+     * <code>optional int64 requests = 4;</code>
      */
-    public int getRequests() {
+    public long getRequests() {
       return requests_;
     }
 
@@ -2992,7 +2992,7 @@ public final class StorageClusterStatusMessage {
       liveNodes_ = java.util.Collections.emptyList();
       deadNodes_ = com.google.protobuf.LazyStringArrayList.EMPTY;
       regions_ = 0;
-      requests_ = 0;
+      requests_ = 0L;
       averageLoad_ = 0D;
     }
     private byte memoizedIsInitialized = -1;
@@ -3023,7 +3023,7 @@ public final class StorageClusterStatusMessage {
         output.writeInt32(3, regions_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeInt32(4, requests_);
+        output.writeInt64(4, requests_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         output.writeDouble(5, averageLoad_);
@@ -3056,7 +3056,7 @@ public final class StorageClusterStatusMessage {
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeInt32Size(4, requests_);
+          .computeInt64Size(4, requests_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
@@ -3189,7 +3189,7 @@ public final class StorageClusterStatusMessage {
         bitField0_ = (bitField0_ & ~0x00000002);
         regions_ = 0;
         bitField0_ = (bitField0_ & ~0x00000004);
-        requests_ = 0;
+        requests_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000008);
         averageLoad_ = 0D;
         bitField0_ = (bitField0_ & ~0x00000010);
@@ -3796,35 +3796,35 @@ public final class StorageClusterStatusMessage {
         return this;
       }
 
-      // optional int32 requests = 4;
-      private int requests_ ;
+      // optional int64 requests = 4;
+      private long requests_ ;
       /**
-       * <code>optional int32 requests = 4;</code>
+       * <code>optional int64 requests = 4;</code>
        */
       public boolean hasRequests() {
         return ((bitField0_ & 0x00000008) == 0x00000008);
       }
       /**
-       * <code>optional int32 requests = 4;</code>
+       * <code>optional int64 requests = 4;</code>
        */
-      public int getRequests() {
+      public long getRequests() {
         return requests_;
       }
       /**
-       * <code>optional int32 requests = 4;</code>
+       * <code>optional int64 requests = 4;</code>
        */
-      public Builder setRequests(int value) {
+      public Builder setRequests(long value) {
         bitField0_ |= 0x00000008;
         requests_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional int32 requests = 4;</code>
+       * <code>optional int64 requests = 4;</code>
        */
       public Builder clearRequests() {
         bitField0_ = (bitField0_ & ~0x00000008);
-        requests_ = 0;
+        requests_ = 0L;
         onChanged();
         return this;
       }
@@ -3903,7 +3903,7 @@ public final class StorageClusterStatusMessage {
       "es\030\001 \003(\0132J.org.apache.hadoop.hbase.rest." +
       "protobuf.generated.StorageClusterStatus." +
       "Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regions\030\003 \001(\005" +
-      "\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoad\030\005 \001(\001\032" +
+      "\022\020\n\010requests\030\004 \001(\003\022\023\n\013averageLoad\030\005 \001(\001\032" +
       "\322\002\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" +
       "\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefileSizeMB\030" +
       "\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n\024storefi",
@@ -3913,7 +3913,7 @@ public final class StorageClusterStatusMessage {
       "izeKB\030\n \001(\005\022\036\n\026totalStaticBloomSizeKB\030\013 " +
       "\001(\005\022\032\n\022totalCompactingKVs\030\014 \001(\003\022\033\n\023curre" +
       "ntCompactedKVs\030\r \001(\003\032\303\001\n\004Node\022\014\n\004name\030\001 " +
-      "\002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\005" +
+      "\002(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\003" +
       "\022\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 " +
       "\001(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop" +
       ".hbase.rest.protobuf.generated.StorageCl",

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
index a0291b4..34dc1c3 100644
--- a/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
+++ b/hbase-rest/src/main/resources/org/apache/hadoop/hbase/rest/protobuf/StorageClusterStatusMessage.proto
@@ -36,7 +36,7 @@ message StorageClusterStatus {
   message Node {
     required string name = 1;    // name:port
     optional int64 startCode = 2;
-    optional int32 requests = 3;
+    optional int64 requests = 3;
     optional int32 heapSizeMB = 4;
     optional int32 maxHeapSizeMB = 5;
     repeated Region regions = 6;
@@ -46,6 +46,6 @@ message StorageClusterStatus {
   repeated string deadNodes = 2;
   // summary statistics
   optional int32 regions = 3; 
-  optional int32 requests = 4; 
+  optional int64 requests = 4;
   optional double averageLoad = 5;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
index 9731b4e..af3ab73 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
@@ -52,7 +52,7 @@ public class MetricsMaster {
   /**
    * @param inc How much to add to requests.
    */
-  public void incrementRequests(final int inc) {
+  public void incrementRequests(final long inc) {
     masterSource.incRequests(inc);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java
index 2ba771d..f6d4409 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namespace/NamespaceAuditor.java
@@ -31,7 +31,7 @@ import com.google.common.annotations.VisibleForTesting;
  * splitting preserve namespace quota. The namespace quota can be specified while namespace
  * creation.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 public class NamespaceAuditor {
   private static Log LOG = LogFactory.getLog(NamespaceAuditor.class);
   static final String NS_AUDITOR_INIT_TIMEOUT = "hbase.namespace.auditor.init.timeout";

http://git-wip-us.apache.org/repos/asf/hbase/blob/e860c66d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
index 5d0573f..44e2551 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -45,6 +45,8 @@ import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.io.Writable;
 
+import com.google.common.annotations.VisibleForTesting;
+
 
 /**
  * WALEdit: Used in HBase's transaction log (WAL) to represent
@@ -91,6 +93,7 @@ public class WALEdit implements Writable, HeapSize {
   static final byte[] COMPACTION = Bytes.toBytes("HBASE::COMPACTION");
   static final byte [] FLUSH = Bytes.toBytes("HBASE::FLUSH");
   static final byte [] REGION_EVENT = Bytes.toBytes("HBASE::REGION_EVENT");
+  @VisibleForTesting
   public static final byte [] BULK_LOAD = Bytes.toBytes("HBASE::BULK_LOAD");
 
   private final int VERSION_2 = -1;