You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2017/05/19 17:13:42 UTC

[42/50] [abbrv] hbase git commit: HBASE-17955 Various reviewboard improvements to space quota work

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
index c70b736..b886f5c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/RegionServerStatusProtos.java
@@ -10173,42 +10173,42 @@ public final class RegionServerStatusProtos {
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    boolean hasRegion();
+    boolean hasRegionInfo();
     /**
      * <pre>
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
     /**
      * <pre>
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder();
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
 
     /**
      * <pre>
      * The size in bytes of the region
      * </pre>
      *
-     * <code>optional uint64 size = 2;</code>
+     * <code>optional uint64 region_size = 2;</code>
      */
-    boolean hasSize();
+    boolean hasRegionSize();
     /**
      * <pre>
      * The size in bytes of the region
      * </pre>
      *
-     * <code>optional uint64 size = 2;</code>
+     * <code>optional uint64 region_size = 2;</code>
      */
-    long getSize();
+    long getRegionSize();
   }
   /**
    * Protobuf type {@code hbase.pb.RegionSpaceUse}
@@ -10222,7 +10222,7 @@ public final class RegionServerStatusProtos {
       super(builder);
     }
     private RegionSpaceUse() {
-      size_ = 0L;
+      regionSize_ = 0L;
     }
 
     @java.lang.Override
@@ -10256,19 +10256,19 @@ public final class RegionServerStatusProtos {
             case 10: {
               org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
               if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = region_.toBuilder();
+                subBuilder = regionInfo_.toBuilder();
               }
-              region_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
+              regionInfo_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
               if (subBuilder != null) {
-                subBuilder.mergeFrom(region_);
-                region_ = subBuilder.buildPartial();
+                subBuilder.mergeFrom(regionInfo_);
+                regionInfo_ = subBuilder.buildPartial();
               }
               bitField0_ |= 0x00000001;
               break;
             }
             case 16: {
               bitField0_ |= 0x00000002;
-              size_ = input.readUInt64();
+              regionSize_ = input.readUInt64();
               break;
             }
           }
@@ -10296,16 +10296,16 @@ public final class RegionServerStatusProtos {
     }
 
     private int bitField0_;
-    public static final int REGION_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_;
+    public static final int REGION_INFO_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
     /**
      * <pre>
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    public boolean hasRegion() {
+    public boolean hasRegionInfo() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
@@ -10313,32 +10313,32 @@ public final class RegionServerStatusProtos {
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
-      return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+      return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
     }
     /**
      * <pre>
      * A region identifier
      * </pre>
      *
-     * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+     * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
      */
-    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
-      return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+      return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
     }
 
-    public static final int SIZE_FIELD_NUMBER = 2;
-    private long size_;
+    public static final int REGION_SIZE_FIELD_NUMBER = 2;
+    private long regionSize_;
     /**
      * <pre>
      * The size in bytes of the region
      * </pre>
      *
-     * <code>optional uint64 size = 2;</code>
+     * <code>optional uint64 region_size = 2;</code>
      */
-    public boolean hasSize() {
+    public boolean hasRegionSize() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
@@ -10346,10 +10346,10 @@ public final class RegionServerStatusProtos {
      * The size in bytes of the region
      * </pre>
      *
-     * <code>optional uint64 size = 2;</code>
+     * <code>optional uint64 region_size = 2;</code>
      */
-    public long getSize() {
-      return size_;
+    public long getRegionSize() {
+      return regionSize_;
     }
 
     private byte memoizedIsInitialized = -1;
@@ -10358,8 +10358,8 @@ public final class RegionServerStatusProtos {
       if (isInitialized == 1) return true;
       if (isInitialized == 0) return false;
 
-      if (hasRegion()) {
-        if (!getRegion().isInitialized()) {
+      if (hasRegionInfo()) {
+        if (!getRegionInfo().isInitialized()) {
           memoizedIsInitialized = 0;
           return false;
         }
@@ -10371,10 +10371,10 @@ public final class RegionServerStatusProtos {
     public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
                         throws java.io.IOException {
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, getRegion());
+        output.writeMessage(1, getRegionInfo());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeUInt64(2, size_);
+        output.writeUInt64(2, regionSize_);
       }
       unknownFields.writeTo(output);
     }
@@ -10386,11 +10386,11 @@ public final class RegionServerStatusProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, getRegion());
+          .computeMessageSize(1, getRegionInfo());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(2, size_);
+          .computeUInt64Size(2, regionSize_);
       }
       size += unknownFields.getSerializedSize();
       memoizedSize = size;
@@ -10409,15 +10409,15 @@ public final class RegionServerStatusProtos {
       org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse) obj;
 
       boolean result = true;
-      result = result && (hasRegion() == other.hasRegion());
-      if (hasRegion()) {
-        result = result && getRegion()
-            .equals(other.getRegion());
+      result = result && (hasRegionInfo() == other.hasRegionInfo());
+      if (hasRegionInfo()) {
+        result = result && getRegionInfo()
+            .equals(other.getRegionInfo());
       }
-      result = result && (hasSize() == other.hasSize());
-      if (hasSize()) {
-        result = result && (getSize()
-            == other.getSize());
+      result = result && (hasRegionSize() == other.hasRegionSize());
+      if (hasRegionSize()) {
+        result = result && (getRegionSize()
+            == other.getRegionSize());
       }
       result = result && unknownFields.equals(other.unknownFields);
       return result;
@@ -10430,14 +10430,14 @@ public final class RegionServerStatusProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptor().hashCode();
-      if (hasRegion()) {
-        hash = (37 * hash) + REGION_FIELD_NUMBER;
-        hash = (53 * hash) + getRegion().hashCode();
+      if (hasRegionInfo()) {
+        hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionInfo().hashCode();
       }
-      if (hasSize()) {
-        hash = (37 * hash) + SIZE_FIELD_NUMBER;
+      if (hasRegionSize()) {
+        hash = (37 * hash) + REGION_SIZE_FIELD_NUMBER;
         hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
-            getSize());
+            getRegionSize());
       }
       hash = (29 * hash) + unknownFields.hashCode();
       memoizedHashCode = hash;
@@ -10553,18 +10553,18 @@ public final class RegionServerStatusProtos {
       private void maybeForceBuilderInitialization() {
         if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
                 .alwaysUseFieldBuilders) {
-          getRegionFieldBuilder();
+          getRegionInfoFieldBuilder();
         }
       }
       public Builder clear() {
         super.clear();
-        if (regionBuilder_ == null) {
-          region_ = null;
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = null;
         } else {
-          regionBuilder_.clear();
+          regionInfoBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
-        size_ = 0L;
+        regionSize_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000002);
         return this;
       }
@@ -10593,15 +10593,15 @@ public final class RegionServerStatusProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        if (regionBuilder_ == null) {
-          result.region_ = region_;
+        if (regionInfoBuilder_ == null) {
+          result.regionInfo_ = regionInfo_;
         } else {
-          result.region_ = regionBuilder_.build();
+          result.regionInfo_ = regionInfoBuilder_.build();
         }
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        result.size_ = size_;
+        result.regionSize_ = regionSize_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -10644,11 +10644,11 @@ public final class RegionServerStatusProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse other) {
         if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.getDefaultInstance()) return this;
-        if (other.hasRegion()) {
-          mergeRegion(other.getRegion());
+        if (other.hasRegionInfo()) {
+          mergeRegionInfo(other.getRegionInfo());
         }
-        if (other.hasSize()) {
-          setSize(other.getSize());
+        if (other.hasRegionSize()) {
+          setRegionSize(other.getRegionSize());
         }
         this.mergeUnknownFields(other.unknownFields);
         onChanged();
@@ -10656,8 +10656,8 @@ public final class RegionServerStatusProtos {
       }
 
       public final boolean isInitialized() {
-        if (hasRegion()) {
-          if (!getRegion().isInitialized()) {
+        if (hasRegionInfo()) {
+          if (!getRegionInfo().isInitialized()) {
             return false;
           }
         }
@@ -10683,17 +10683,17 @@ public final class RegionServerStatusProtos {
       }
       private int bitField0_;
 
-      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo region_ = null;
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = null;
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionBuilder_;
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
       /**
        * <pre>
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public boolean hasRegion() {
+      public boolean hasRegionInfo() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
@@ -10701,13 +10701,13 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegion() {
-        if (regionBuilder_ == null) {
-          return region_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
         } else {
-          return regionBuilder_.getMessage();
+          return regionInfoBuilder_.getMessage();
         }
       }
       /**
@@ -10715,17 +10715,17 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public Builder setRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
-        if (regionBuilder_ == null) {
+      public Builder setRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          region_ = value;
+          regionInfo_ = value;
           onChanged();
         } else {
-          regionBuilder_.setMessage(value);
+          regionInfoBuilder_.setMessage(value);
         }
         bitField0_ |= 0x00000001;
         return this;
@@ -10735,15 +10735,15 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public Builder setRegion(
+      public Builder setRegionInfo(
           org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
-        if (regionBuilder_ == null) {
-          region_ = builderForValue.build();
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = builderForValue.build();
           onChanged();
         } else {
-          regionBuilder_.setMessage(builderForValue.build());
+          regionInfoBuilder_.setMessage(builderForValue.build());
         }
         bitField0_ |= 0x00000001;
         return this;
@@ -10753,21 +10753,21 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public Builder mergeRegion(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
-        if (regionBuilder_ == null) {
+      public Builder mergeRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
           if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              region_ != null &&
-              region_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
-            region_ =
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(region_).mergeFrom(value).buildPartial();
+              regionInfo_ != null &&
+              regionInfo_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
+            regionInfo_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
           } else {
-            region_ = value;
+            regionInfo_ = value;
           }
           onChanged();
         } else {
-          regionBuilder_.mergeFrom(value);
+          regionInfoBuilder_.mergeFrom(value);
         }
         bitField0_ |= 0x00000001;
         return this;
@@ -10777,14 +10777,14 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public Builder clearRegion() {
-        if (regionBuilder_ == null) {
-          region_ = null;
+      public Builder clearRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = null;
           onChanged();
         } else {
-          regionBuilder_.clear();
+          regionInfoBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
@@ -10794,26 +10794,26 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionBuilder() {
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
         bitField0_ |= 0x00000001;
         onChanged();
-        return getRegionFieldBuilder().getBuilder();
+        return getRegionInfoFieldBuilder().getBuilder();
       }
       /**
        * <pre>
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
-      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionOrBuilder() {
-        if (regionBuilder_ != null) {
-          return regionBuilder_.getMessageOrBuilder();
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
+        if (regionInfoBuilder_ != null) {
+          return regionInfoBuilder_.getMessageOrBuilder();
         } else {
-          return region_ == null ?
-              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : region_;
+          return regionInfo_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance() : regionInfo_;
         }
       }
       /**
@@ -10821,31 +10821,31 @@ public final class RegionServerStatusProtos {
        * A region identifier
        * </pre>
        *
-       * <code>optional .hbase.pb.RegionInfo region = 1;</code>
+       * <code>optional .hbase.pb.RegionInfo region_info = 1;</code>
        */
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
-          getRegionFieldBuilder() {
-        if (regionBuilder_ == null) {
-          regionBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
+          getRegionInfoFieldBuilder() {
+        if (regionInfoBuilder_ == null) {
+          regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
               org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
-                  getRegion(),
+                  getRegionInfo(),
                   getParentForChildren(),
                   isClean());
-          region_ = null;
+          regionInfo_ = null;
         }
-        return regionBuilder_;
+        return regionInfoBuilder_;
       }
 
-      private long size_ ;
+      private long regionSize_ ;
       /**
        * <pre>
        * The size in bytes of the region
        * </pre>
        *
-       * <code>optional uint64 size = 2;</code>
+       * <code>optional uint64 region_size = 2;</code>
        */
-      public boolean hasSize() {
+      public boolean hasRegionSize() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
@@ -10853,21 +10853,21 @@ public final class RegionServerStatusProtos {
        * The size in bytes of the region
        * </pre>
        *
-       * <code>optional uint64 size = 2;</code>
+       * <code>optional uint64 region_size = 2;</code>
        */
-      public long getSize() {
-        return size_;
+      public long getRegionSize() {
+        return regionSize_;
       }
       /**
        * <pre>
        * The size in bytes of the region
        * </pre>
        *
-       * <code>optional uint64 size = 2;</code>
+       * <code>optional uint64 region_size = 2;</code>
        */
-      public Builder setSize(long value) {
+      public Builder setRegionSize(long value) {
         bitField0_ |= 0x00000002;
-        size_ = value;
+        regionSize_ = value;
         onChanged();
         return this;
       }
@@ -10876,11 +10876,11 @@ public final class RegionServerStatusProtos {
        * The size in bytes of the region
        * </pre>
        *
-       * <code>optional uint64 size = 2;</code>
+       * <code>optional uint64 region_size = 2;</code>
        */
-      public Builder clearSize() {
+      public Builder clearRegionSize() {
         bitField0_ = (bitField0_ & ~0x00000002);
-        size_ = 0L;
+        regionSize_ = 0L;
         onChanged();
         return this;
       }
@@ -10940,7 +10940,7 @@ public final class RegionServerStatusProtos {
     /**
      * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
      */
-    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse> 
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse>
         getSpaceUseList();
     /**
      * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
@@ -10953,7 +10953,7 @@ public final class RegionServerStatusProtos {
     /**
      * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
      */
-    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> 
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
         getSpaceUseOrBuilderList();
     /**
      * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
@@ -11056,7 +11056,7 @@ public final class RegionServerStatusProtos {
     /**
      * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
      */
-    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> 
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
         getSpaceUseOrBuilderList() {
       return spaceUse_;
     }
@@ -11368,7 +11368,7 @@ public final class RegionServerStatusProtos {
               spaceUseBuilder_ = null;
               spaceUse_ = other.spaceUse_;
               bitField0_ = (bitField0_ & ~0x00000001);
-              spaceUseBuilder_ = 
+              spaceUseBuilder_ =
                 org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
                    getSpaceUseFieldBuilder() : null;
             } else {
@@ -11604,7 +11604,7 @@ public final class RegionServerStatusProtos {
       /**
        * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
        */
-      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> 
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
            getSpaceUseOrBuilderList() {
         if (spaceUseBuilder_ != null) {
           return spaceUseBuilder_.getMessageOrBuilderList();
@@ -11630,12 +11630,12 @@ public final class RegionServerStatusProtos {
       /**
        * <code>repeated .hbase.pb.RegionSpaceUse space_use = 1;</code>
        */
-      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder> 
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder>
            getSpaceUseBuilderList() {
         return getSpaceUseFieldBuilder().getBuilderList();
       }
       private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
-          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder> 
+          org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseOrBuilder>
           getSpaceUseFieldBuilder() {
         if (spaceUseBuilder_ == null) {
           spaceUseBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
@@ -12950,17 +12950,17 @@ public final class RegionServerStatusProtos {
       internal_static_hbase_pb_SplitTableRegionResponse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_RegionSpaceUse_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable;
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_RegionSpaceUseReportResponse_descriptor;
-  private static final 
+  private static final
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RegionSpaceUseReportResponse_fieldAccessorTable;
 
@@ -13008,35 +13008,35 @@ public final class RegionServerStatusProtos {
       "est\022)\n\013region_info\030\001 \002(\0132\024.hbase.pb.Regi" +
       "onInfo\022\021\n\tsplit_row\030\002 \002(\014\022\026\n\013nonce_group" +
       "\030\003 \001(\004:\0010\022\020\n\005nonce\030\004 \001(\004:\0010\"+\n\030SplitTabl" +
-      "eRegionResponse\022\017\n\007proc_id\030\001 \001(\004\"D\n\016Regi" +
-      "onSpaceUse\022$\n\006region\030\001 \001(\0132\024.hbase.pb.Re" +
-      "gionInfo\022\014\n\004size\030\002 \001(\004\"J\n\033RegionSpaceUse" +
-      "ReportRequest\022+\n\tspace_use\030\001 \003(\0132\030.hbase",
-      ".pb.RegionSpaceUse\"\036\n\034RegionSpaceUseRepo" +
-      "rtResponse2\316\006\n\031RegionServerStatusService" +
-      "\022b\n\023RegionServerStartup\022$.hbase.pb.Regio" +
-      "nServerStartupRequest\032%.hbase.pb.RegionS" +
-      "erverStartupResponse\022_\n\022RegionServerRepo" +
-      "rt\022#.hbase.pb.RegionServerReportRequest\032" +
-      "$.hbase.pb.RegionServerReportResponse\022_\n" +
-      "\022ReportRSFatalError\022#.hbase.pb.ReportRSF" +
-      "atalErrorRequest\032$.hbase.pb.ReportRSFata" +
-      "lErrorResponse\022q\n\030GetLastFlushedSequence",
-      "Id\022).hbase.pb.GetLastFlushedSequenceIdRe" +
-      "quest\032*.hbase.pb.GetLastFlushedSequenceI" +
-      "dResponse\022z\n\033ReportRegionStateTransition" +
-      "\022,.hbase.pb.ReportRegionStateTransitionR" +
-      "equest\032-.hbase.pb.ReportRegionStateTrans" +
-      "itionResponse\022T\n\013SplitRegion\022!.hbase.pb." +
-      "SplitTableRegionRequest\032\".hbase.pb.Split" +
-      "TableRegionResponse\022_\n\022getProcedureResul" +
-      "t\022#.hbase.pb.GetProcedureResultRequest\032$" +
-      ".hbase.pb.GetProcedureResultResponse\022e\n\024",
-      "ReportRegionSpaceUse\022%.hbase.pb.RegionSp" +
-      "aceUseReportRequest\032&.hbase.pb.RegionSpa" +
-      "ceUseReportResponseBU\n1org.apache.hadoop" +
-      ".hbase.shaded.protobuf.generatedB\030Region" +
-      "ServerStatusProtosH\001\210\001\001\240\001\001"
+      "eRegionResponse\022\017\n\007proc_id\030\001 \001(\004\"P\n\016Regi" +
+      "onSpaceUse\022)\n\013region_info\030\001 \001(\0132\024.hbase." +
+      "pb.RegionInfo\022\023\n\013region_size\030\002 \001(\004\"J\n\033Re" +
+      "gionSpaceUseReportRequest\022+\n\tspace_use\030\001",
+      " \003(\0132\030.hbase.pb.RegionSpaceUse\"\036\n\034Region" +
+      "SpaceUseReportResponse2\316\006\n\031RegionServerS" +
+      "tatusService\022b\n\023RegionServerStartup\022$.hb" +
+      "ase.pb.RegionServerStartupRequest\032%.hbas" +
+      "e.pb.RegionServerStartupResponse\022_\n\022Regi" +
+      "onServerReport\022#.hbase.pb.RegionServerRe" +
+      "portRequest\032$.hbase.pb.RegionServerRepor" +
+      "tResponse\022_\n\022ReportRSFatalError\022#.hbase." +
+      "pb.ReportRSFatalErrorRequest\032$.hbase.pb." +
+      "ReportRSFatalErrorResponse\022q\n\030GetLastFlu",
+      "shedSequenceId\022).hbase.pb.GetLastFlushed" +
+      "SequenceIdRequest\032*.hbase.pb.GetLastFlus" +
+      "hedSequenceIdResponse\022z\n\033ReportRegionSta" +
+      "teTransition\022,.hbase.pb.ReportRegionStat" +
+      "eTransitionRequest\032-.hbase.pb.ReportRegi" +
+      "onStateTransitionResponse\022T\n\013SplitRegion" +
+      "\022!.hbase.pb.SplitTableRegionRequest\032\".hb" +
+      "ase.pb.SplitTableRegionResponse\022_\n\022getPr" +
+      "ocedureResult\022#.hbase.pb.GetProcedureRes" +
+      "ultRequest\032$.hbase.pb.GetProcedureResult",
+      "Response\022e\n\024ReportRegionSpaceUse\022%.hbase" +
+      ".pb.RegionSpaceUseReportRequest\032&.hbase." +
+      "pb.RegionSpaceUseReportResponseBU\n1org.a" +
+      "pache.hadoop.hbase.shaded.protobuf.gener" +
+      "atedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -13136,7 +13136,7 @@ public final class RegionServerStatusProtos {
     internal_static_hbase_pb_RegionSpaceUse_fieldAccessorTable = new
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_RegionSpaceUse_descriptor,
-        new java.lang.String[] { "Region", "Size", });
+        new java.lang.String[] { "RegionInfo", "RegionSize", });
     internal_static_hbase_pb_RegionSpaceUseReportRequest_descriptor =
       getDescriptor().getMessageTypes().get(14);
     internal_static_hbase_pb_RegionSpaceUseReportRequest_fieldAccessorTable = new

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol-shaded/src/main/protobuf/Quota.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Quota.proto b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
index 364c58b..0d171b3 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Quota.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Quota.proto
@@ -100,16 +100,16 @@ message SpaceLimitRequest {
 // Represents the state of a quota on a table. Either the quota is not in violation
 // or it is in violation there is a violation policy which should be in effect.
 message SpaceQuotaStatus {
-  optional SpaceViolationPolicy policy = 1;
+  optional SpaceViolationPolicy violation_policy = 1;
   optional bool in_violation = 2;
 }
 
 // Message stored in the value of hbase:quota table to denote the status of a table WRT
 // the quota applicable to it.
 message SpaceQuotaSnapshot {
-  optional SpaceQuotaStatus status = 1;
-  optional uint64 usage = 2;
-  optional uint64 limit = 3;
+  optional SpaceQuotaStatus quota_status = 1;
+  optional uint64 quota_usage = 2;
+  optional uint64 quota_limit = 3;
 }
 
 message GetSpaceQuotaRegionSizesRequest {

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index 23ddd43..0b765d6 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -142,8 +142,8 @@ message SplitTableRegionResponse {
 }
 
 message RegionSpaceUse {
-  optional RegionInfo region = 1; // A region identifier
-  optional uint64 size = 2; // The size in bytes of the region
+  optional RegionInfo region_info = 1; // A region identifier
+  optional uint64 region_size = 2; // The size in bytes of the region
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 96a8ab2..fad9f44 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -4258,7 +4258,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getSpaceFieldBuilder() {
         if (spaceBuilder_ == null) {
           spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -5876,7 +5876,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
           getQuotaFieldBuilder() {
         if (quotaBuilder_ == null) {
           quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -5903,15 +5903,15 @@ public final class QuotaProtos {
   public interface SpaceQuotaStatusOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // optional .hbase.pb.SpaceViolationPolicy policy = 1;
+    // optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;
     /**
-     * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
      */
-    boolean hasPolicy();
+    boolean hasViolationPolicy();
     /**
-     * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
      */
-    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getPolicy();
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy();
 
     // optional bool in_violation = 2;
     /**
@@ -5986,7 +5986,7 @@ public final class QuotaProtos {
                 unknownFields.mergeVarintField(1, rawValue);
               } else {
                 bitField0_ |= 0x00000001;
-                policy_ = value;
+                violationPolicy_ = value;
               }
               break;
             }
@@ -6035,20 +6035,20 @@ public final class QuotaProtos {
     }
 
     private int bitField0_;
-    // optional .hbase.pb.SpaceViolationPolicy policy = 1;
-    public static final int POLICY_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy policy_;
+    // optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;
+    public static final int VIOLATION_POLICY_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_;
     /**
-     * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
      */
-    public boolean hasPolicy() {
+    public boolean hasViolationPolicy() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+     * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getPolicy() {
-      return policy_;
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+      return violationPolicy_;
     }
 
     // optional bool in_violation = 2;
@@ -6068,7 +6068,7 @@ public final class QuotaProtos {
     }
 
     private void initFields() {
-      policy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+      violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
       inViolation_ = false;
     }
     private byte memoizedIsInitialized = -1;
@@ -6084,7 +6084,7 @@ public final class QuotaProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeEnum(1, policy_.getNumber());
+        output.writeEnum(1, violationPolicy_.getNumber());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         output.writeBool(2, inViolation_);
@@ -6100,7 +6100,7 @@ public final class QuotaProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeEnumSize(1, policy_.getNumber());
+          .computeEnumSize(1, violationPolicy_.getNumber());
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
@@ -6129,10 +6129,10 @@ public final class QuotaProtos {
       org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus) obj;
 
       boolean result = true;
-      result = result && (hasPolicy() == other.hasPolicy());
-      if (hasPolicy()) {
+      result = result && (hasViolationPolicy() == other.hasViolationPolicy());
+      if (hasViolationPolicy()) {
         result = result &&
-            (getPolicy() == other.getPolicy());
+            (getViolationPolicy() == other.getViolationPolicy());
       }
       result = result && (hasInViolation() == other.hasInViolation());
       if (hasInViolation()) {
@@ -6152,9 +6152,9 @@ public final class QuotaProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasPolicy()) {
-        hash = (37 * hash) + POLICY_FIELD_NUMBER;
-        hash = (53 * hash) + hashEnum(getPolicy());
+      if (hasViolationPolicy()) {
+        hash = (37 * hash) + VIOLATION_POLICY_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getViolationPolicy());
       }
       if (hasInViolation()) {
         hash = (37 * hash) + IN_VIOLATION_FIELD_NUMBER;
@@ -6274,7 +6274,7 @@ public final class QuotaProtos {
 
       public Builder clear() {
         super.clear();
-        policy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+        violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
         bitField0_ = (bitField0_ & ~0x00000001);
         inViolation_ = false;
         bitField0_ = (bitField0_ & ~0x00000002);
@@ -6309,7 +6309,7 @@ public final class QuotaProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.policy_ = policy_;
+        result.violationPolicy_ = violationPolicy_;
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
@@ -6330,8 +6330,8 @@ public final class QuotaProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance()) return this;
-        if (other.hasPolicy()) {
-          setPolicy(other.getPolicy());
+        if (other.hasViolationPolicy()) {
+          setViolationPolicy(other.getViolationPolicy());
         }
         if (other.hasInViolation()) {
           setInViolation(other.getInViolation());
@@ -6363,38 +6363,38 @@ public final class QuotaProtos {
       }
       private int bitField0_;
 
-      // optional .hbase.pb.SpaceViolationPolicy policy = 1;
-      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy policy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+      // optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
       /**
-       * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
        */
-      public boolean hasPolicy() {
+      public boolean hasViolationPolicy() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getPolicy() {
-        return policy_;
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy getViolationPolicy() {
+        return violationPolicy_;
       }
       /**
-       * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
        */
-      public Builder setPolicy(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) {
+      public Builder setViolationPolicy(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy value) {
         if (value == null) {
           throw new NullPointerException();
         }
         bitField0_ |= 0x00000001;
-        policy_ = value;
+        violationPolicy_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional .hbase.pb.SpaceViolationPolicy policy = 1;</code>
+       * <code>optional .hbase.pb.SpaceViolationPolicy violation_policy = 1;</code>
        */
-      public Builder clearPolicy() {
+      public Builder clearViolationPolicy() {
         bitField0_ = (bitField0_ & ~0x00000001);
-        policy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
+        violationPolicy_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceViolationPolicy.DISABLE;
         onChanged();
         return this;
       }
@@ -6446,39 +6446,39 @@ public final class QuotaProtos {
   public interface SpaceQuotaSnapshotOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // optional .hbase.pb.SpaceQuotaStatus status = 1;
+    // optional .hbase.pb.SpaceQuotaStatus quota_status = 1;
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    boolean hasStatus();
+    boolean hasQuotaStatus();
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getStatus();
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getQuotaStatus();
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getStatusOrBuilder();
+    org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getQuotaStatusOrBuilder();
 
-    // optional uint64 usage = 2;
+    // optional uint64 quota_usage = 2;
     /**
-     * <code>optional uint64 usage = 2;</code>
+     * <code>optional uint64 quota_usage = 2;</code>
      */
-    boolean hasUsage();
+    boolean hasQuotaUsage();
     /**
-     * <code>optional uint64 usage = 2;</code>
+     * <code>optional uint64 quota_usage = 2;</code>
      */
-    long getUsage();
+    long getQuotaUsage();
 
-    // optional uint64 limit = 3;
+    // optional uint64 quota_limit = 3;
     /**
-     * <code>optional uint64 limit = 3;</code>
+     * <code>optional uint64 quota_limit = 3;</code>
      */
-    boolean hasLimit();
+    boolean hasQuotaLimit();
     /**
-     * <code>optional uint64 limit = 3;</code>
+     * <code>optional uint64 quota_limit = 3;</code>
      */
-    long getLimit();
+    long getQuotaLimit();
   }
   /**
    * Protobuf type {@code hbase.pb.SpaceQuotaSnapshot}
@@ -6539,24 +6539,24 @@ public final class QuotaProtos {
             case 10: {
               org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder subBuilder = null;
               if (((bitField0_ & 0x00000001) == 0x00000001)) {
-                subBuilder = status_.toBuilder();
+                subBuilder = quotaStatus_.toBuilder();
               }
-              status_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.PARSER, extensionRegistry);
+              quotaStatus_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.PARSER, extensionRegistry);
               if (subBuilder != null) {
-                subBuilder.mergeFrom(status_);
-                status_ = subBuilder.buildPartial();
+                subBuilder.mergeFrom(quotaStatus_);
+                quotaStatus_ = subBuilder.buildPartial();
               }
               bitField0_ |= 0x00000001;
               break;
             }
             case 16: {
               bitField0_ |= 0x00000002;
-              usage_ = input.readUInt64();
+              quotaUsage_ = input.readUInt64();
               break;
             }
             case 24: {
               bitField0_ |= 0x00000004;
-              limit_ = input.readUInt64();
+              quotaLimit_ = input.readUInt64();
               break;
             }
           }
@@ -6599,64 +6599,64 @@ public final class QuotaProtos {
     }
 
     private int bitField0_;
-    // optional .hbase.pb.SpaceQuotaStatus status = 1;
-    public static final int STATUS_FIELD_NUMBER = 1;
-    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus status_;
+    // optional .hbase.pb.SpaceQuotaStatus quota_status = 1;
+    public static final int QUOTA_STATUS_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus quotaStatus_;
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    public boolean hasStatus() {
+    public boolean hasQuotaStatus() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getStatus() {
-      return status_;
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getQuotaStatus() {
+      return quotaStatus_;
     }
     /**
-     * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+     * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
      */
-    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getStatusOrBuilder() {
-      return status_;
+    public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getQuotaStatusOrBuilder() {
+      return quotaStatus_;
     }
 
-    // optional uint64 usage = 2;
-    public static final int USAGE_FIELD_NUMBER = 2;
-    private long usage_;
+    // optional uint64 quota_usage = 2;
+    public static final int QUOTA_USAGE_FIELD_NUMBER = 2;
+    private long quotaUsage_;
     /**
-     * <code>optional uint64 usage = 2;</code>
+     * <code>optional uint64 quota_usage = 2;</code>
      */
-    public boolean hasUsage() {
+    public boolean hasQuotaUsage() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional uint64 usage = 2;</code>
+     * <code>optional uint64 quota_usage = 2;</code>
      */
-    public long getUsage() {
-      return usage_;
+    public long getQuotaUsage() {
+      return quotaUsage_;
     }
 
-    // optional uint64 limit = 3;
-    public static final int LIMIT_FIELD_NUMBER = 3;
-    private long limit_;
+    // optional uint64 quota_limit = 3;
+    public static final int QUOTA_LIMIT_FIELD_NUMBER = 3;
+    private long quotaLimit_;
     /**
-     * <code>optional uint64 limit = 3;</code>
+     * <code>optional uint64 quota_limit = 3;</code>
      */
-    public boolean hasLimit() {
+    public boolean hasQuotaLimit() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
     }
     /**
-     * <code>optional uint64 limit = 3;</code>
+     * <code>optional uint64 quota_limit = 3;</code>
      */
-    public long getLimit() {
-      return limit_;
+    public long getQuotaLimit() {
+      return quotaLimit_;
     }
 
     private void initFields() {
-      status_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
-      usage_ = 0L;
-      limit_ = 0L;
+      quotaStatus_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
+      quotaUsage_ = 0L;
+      quotaLimit_ = 0L;
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -6671,13 +6671,13 @@ public final class QuotaProtos {
                         throws java.io.IOException {
       getSerializedSize();
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeMessage(1, status_);
+        output.writeMessage(1, quotaStatus_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeUInt64(2, usage_);
+        output.writeUInt64(2, quotaUsage_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeUInt64(3, limit_);
+        output.writeUInt64(3, quotaLimit_);
       }
       getUnknownFields().writeTo(output);
     }
@@ -6690,15 +6690,15 @@ public final class QuotaProtos {
       size = 0;
       if (((bitField0_ & 0x00000001) == 0x00000001)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(1, status_);
+          .computeMessageSize(1, quotaStatus_);
       }
       if (((bitField0_ & 0x00000002) == 0x00000002)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(2, usage_);
+          .computeUInt64Size(2, quotaUsage_);
       }
       if (((bitField0_ & 0x00000004) == 0x00000004)) {
         size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(3, limit_);
+          .computeUInt64Size(3, quotaLimit_);
       }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
@@ -6723,20 +6723,20 @@ public final class QuotaProtos {
       org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot other = (org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot) obj;
 
       boolean result = true;
-      result = result && (hasStatus() == other.hasStatus());
-      if (hasStatus()) {
-        result = result && getStatus()
-            .equals(other.getStatus());
+      result = result && (hasQuotaStatus() == other.hasQuotaStatus());
+      if (hasQuotaStatus()) {
+        result = result && getQuotaStatus()
+            .equals(other.getQuotaStatus());
       }
-      result = result && (hasUsage() == other.hasUsage());
-      if (hasUsage()) {
-        result = result && (getUsage()
-            == other.getUsage());
+      result = result && (hasQuotaUsage() == other.hasQuotaUsage());
+      if (hasQuotaUsage()) {
+        result = result && (getQuotaUsage()
+            == other.getQuotaUsage());
       }
-      result = result && (hasLimit() == other.hasLimit());
-      if (hasLimit()) {
-        result = result && (getLimit()
-            == other.getLimit());
+      result = result && (hasQuotaLimit() == other.hasQuotaLimit());
+      if (hasQuotaLimit()) {
+        result = result && (getQuotaLimit()
+            == other.getQuotaLimit());
       }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
@@ -6751,17 +6751,17 @@ public final class QuotaProtos {
       }
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasStatus()) {
-        hash = (37 * hash) + STATUS_FIELD_NUMBER;
-        hash = (53 * hash) + getStatus().hashCode();
+      if (hasQuotaStatus()) {
+        hash = (37 * hash) + QUOTA_STATUS_FIELD_NUMBER;
+        hash = (53 * hash) + getQuotaStatus().hashCode();
       }
-      if (hasUsage()) {
-        hash = (37 * hash) + USAGE_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getUsage());
+      if (hasQuotaUsage()) {
+        hash = (37 * hash) + QUOTA_USAGE_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getQuotaUsage());
       }
-      if (hasLimit()) {
-        hash = (37 * hash) + LIMIT_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getLimit());
+      if (hasQuotaLimit()) {
+        hash = (37 * hash) + QUOTA_LIMIT_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getQuotaLimit());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
@@ -6869,7 +6869,7 @@ public final class QuotaProtos {
       }
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getStatusFieldBuilder();
+          getQuotaStatusFieldBuilder();
         }
       }
       private static Builder create() {
@@ -6878,15 +6878,15 @@ public final class QuotaProtos {
 
       public Builder clear() {
         super.clear();
-        if (statusBuilder_ == null) {
-          status_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
+        if (quotaStatusBuilder_ == null) {
+          quotaStatus_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
         } else {
-          statusBuilder_.clear();
+          quotaStatusBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
-        usage_ = 0L;
+        quotaUsage_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000002);
-        limit_ = 0L;
+        quotaLimit_ = 0L;
         bitField0_ = (bitField0_ & ~0x00000004);
         return this;
       }
@@ -6919,19 +6919,19 @@ public final class QuotaProtos {
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        if (statusBuilder_ == null) {
-          result.status_ = status_;
+        if (quotaStatusBuilder_ == null) {
+          result.quotaStatus_ = quotaStatus_;
         } else {
-          result.status_ = statusBuilder_.build();
+          result.quotaStatus_ = quotaStatusBuilder_.build();
         }
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        result.usage_ = usage_;
+        result.quotaUsage_ = quotaUsage_;
         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
           to_bitField0_ |= 0x00000004;
         }
-        result.limit_ = limit_;
+        result.quotaLimit_ = quotaLimit_;
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -6948,14 +6948,14 @@ public final class QuotaProtos {
 
       public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot other) {
         if (other == org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.getDefaultInstance()) return this;
-        if (other.hasStatus()) {
-          mergeStatus(other.getStatus());
+        if (other.hasQuotaStatus()) {
+          mergeQuotaStatus(other.getQuotaStatus());
         }
-        if (other.hasUsage()) {
-          setUsage(other.getUsage());
+        if (other.hasQuotaUsage()) {
+          setQuotaUsage(other.getQuotaUsage());
         }
-        if (other.hasLimit()) {
-          setLimit(other.getLimit());
+        if (other.hasQuotaLimit()) {
+          setQuotaLimit(other.getQuotaLimit());
         }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
@@ -6984,185 +6984,185 @@ public final class QuotaProtos {
       }
       private int bitField0_;
 
-      // optional .hbase.pb.SpaceQuotaStatus status = 1;
-      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus status_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
+      // optional .hbase.pb.SpaceQuotaStatus quota_status = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus quotaStatus_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder> statusBuilder_;
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder> quotaStatusBuilder_;
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public boolean hasStatus() {
+      public boolean hasQuotaStatus() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getStatus() {
-        if (statusBuilder_ == null) {
-          return status_;
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus getQuotaStatus() {
+        if (quotaStatusBuilder_ == null) {
+          return quotaStatus_;
         } else {
-          return statusBuilder_.getMessage();
+          return quotaStatusBuilder_.getMessage();
         }
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public Builder setStatus(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus value) {
-        if (statusBuilder_ == null) {
+      public Builder setQuotaStatus(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus value) {
+        if (quotaStatusBuilder_ == null) {
           if (value == null) {
             throw new NullPointerException();
           }
-          status_ = value;
+          quotaStatus_ = value;
           onChanged();
         } else {
-          statusBuilder_.setMessage(value);
+          quotaStatusBuilder_.setMessage(value);
         }
         bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public Builder setStatus(
+      public Builder setQuotaStatus(
           org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder builderForValue) {
-        if (statusBuilder_ == null) {
-          status_ = builderForValue.build();
+        if (quotaStatusBuilder_ == null) {
+          quotaStatus_ = builderForValue.build();
           onChanged();
         } else {
-          statusBuilder_.setMessage(builderForValue.build());
+          quotaStatusBuilder_.setMessage(builderForValue.build());
         }
         bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public Builder mergeStatus(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus value) {
-        if (statusBuilder_ == null) {
+      public Builder mergeQuotaStatus(org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus value) {
+        if (quotaStatusBuilder_ == null) {
           if (((bitField0_ & 0x00000001) == 0x00000001) &&
-              status_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance()) {
-            status_ =
-              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.newBuilder(status_).mergeFrom(value).buildPartial();
+              quotaStatus_ != org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance()) {
+            quotaStatus_ =
+              org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.newBuilder(quotaStatus_).mergeFrom(value).buildPartial();
           } else {
-            status_ = value;
+            quotaStatus_ = value;
           }
           onChanged();
         } else {
-          statusBuilder_.mergeFrom(value);
+          quotaStatusBuilder_.mergeFrom(value);
         }
         bitField0_ |= 0x00000001;
         return this;
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public Builder clearStatus() {
-        if (statusBuilder_ == null) {
-          status_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
+      public Builder clearQuotaStatus() {
+        if (quotaStatusBuilder_ == null) {
+          quotaStatus_ = org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.getDefaultInstance();
           onChanged();
         } else {
-          statusBuilder_.clear();
+          quotaStatusBuilder_.clear();
         }
         bitField0_ = (bitField0_ & ~0x00000001);
         return this;
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder getStatusBuilder() {
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder getQuotaStatusBuilder() {
         bitField0_ |= 0x00000001;
         onChanged();
-        return getStatusFieldBuilder().getBuilder();
+        return getQuotaStatusFieldBuilder().getBuilder();
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
-      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getStatusOrBuilder() {
-        if (statusBuilder_ != null) {
-          return statusBuilder_.getMessageOrBuilder();
+      public org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder getQuotaStatusOrBuilder() {
+        if (quotaStatusBuilder_ != null) {
+          return quotaStatusBuilder_.getMessageOrBuilder();
         } else {
-          return status_;
+          return quotaStatus_;
         }
       }
       /**
-       * <code>optional .hbase.pb.SpaceQuotaStatus status = 1;</code>
+       * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder> 
-          getStatusFieldBuilder() {
-        if (statusBuilder_ == null) {
-          statusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
+          getQuotaStatusFieldBuilder() {
+        if (quotaStatusBuilder_ == null) {
+          quotaStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<
               org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>(
-                  status_,
+                  quotaStatus_,
                   getParentForChildren(),
                   isClean());
-          status_ = null;
+          quotaStatus_ = null;
         }
-        return statusBuilder_;
+        return quotaStatusBuilder_;
       }
 
-      // optional uint64 usage = 2;
-      private long usage_ ;
+      // optional uint64 quota_usage = 2;
+      private long quotaUsage_ ;
       /**
-       * <code>optional uint64 usage = 2;</code>
+       * <code>optional uint64 quota_usage = 2;</code>
        */
-      public boolean hasUsage() {
+      public boolean hasQuotaUsage() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
       }
       /**
-       * <code>optional uint64 usage = 2;</code>
+       * <code>optional uint64 quota_usage = 2;</code>
        */
-      public long getUsage() {
-        return usage_;
+      public long getQuotaUsage() {
+        return quotaUsage_;
       }
       /**
-       * <code>optional uint64 usage = 2;</code>
+       * <code>optional uint64 quota_usage = 2;</code>
        */
-      public Builder setUsage(long value) {
+      public Builder setQuotaUsage(long value) {
         bitField0_ |= 0x00000002;
-        usage_ = value;
+        quotaUsage_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional uint64 usage = 2;</code>
+       * <code>optional uint64 quota_usage = 2;</code>
        */
-      public Builder clearUsage() {
+      public Builder clearQuotaUsage() {
         bitField0_ = (bitField0_ & ~0x00000002);
-        usage_ = 0L;
+        quotaUsage_ = 0L;
         onChanged();
         return this;
       }
 
-      // optional uint64 limit = 3;
-      private long limit_ ;
+      // optional uint64 quota_limit = 3;
+      private long quotaLimit_ ;
       /**
-       * <code>optional uint64 limit = 3;</code>
+       * <code>optional uint64 quota_limit = 3;</code>
        */
-      public boolean hasLimit() {
+      public boolean hasQuotaLimit() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
       }
       /**
-       * <code>optional uint64 limit = 3;</code>
+       * <code>optional uint64 quota_limit = 3;</code>
        */
-      public long getLimit() {
-        return limit_;
+      public long getQuotaLimit() {
+        return quotaLimit_;
       }
       /**
-       * <code>optional uint64 limit = 3;</code>
+       * <code>optional uint64 quota_limit = 3;</code>
        */
-      public Builder setLimit(long value) {
+      public Builder setQuotaLimit(long value) {
         bitField0_ |= 0x00000004;
-        limit_ = value;
+        quotaLimit_ = value;
         onChanged();
         return this;
       }
       /**
-       * <code>optional uint64 limit = 3;</code>
+       * <code>optional uint64 quota_limit = 3;</code>
        */
-      public Builder clearLimit() {
+      public Builder clearQuotaLimit() {
         bitField0_ = (bitField0_ & ~0x00000004);
-        limit_ = 0L;
+        quotaLimit_ = 0L;
         onChanged();
         return this;
       }
@@ -7252,21 +7252,22 @@ public final class QuotaProtos {
       "limit\030\001 \001(\004\0228\n\020violation_policy\030\002 \001(\0162\036." +
       "hbase.pb.SpaceViolationPolicy\022\025\n\006remove\030" +
       "\003 \001(\010:\005false\"8\n\021SpaceLimitRequest\022#\n\005quo",
-      "ta\030\001 \001(\0132\024.hbase.pb.SpaceQuota\"X\n\020SpaceQ" +
-      "uotaStatus\022.\n\006policy\030\001 \001(\0162\036.hbase.pb.Sp" +
-      "aceViolationPolicy\022\024\n\014in_violation\030\002 \001(\010" +
-      "\"^\n\022SpaceQuotaSnapshot\022*\n\006status\030\001 \001(\0132\032" +
-      ".hbase.pb.SpaceQuotaStatus\022\r\n\005usage\030\002 \001(" +
-      "\004\022\r\n\005limit\030\003 \001(\004*&\n\nQuotaScope\022\013\n\007CLUSTE" +
-      "R\020\001\022\013\n\007MACHINE\020\002*v\n\014ThrottleType\022\022\n\016REQU" +
-      "EST_NUMBER\020\001\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_" +
-      "NUMBER\020\003\022\016\n\nWRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020" +
-      "\005\022\r\n\tREAD_SIZE\020\006*$\n\tQuotaType\022\014\n\010THROTTL",
-      "E\020\001\022\t\n\005SPACE\020\002*]\n\024SpaceViolationPolicy\022\013" +
-      "\n\007DISABLE\020\001\022\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r" +
-      "\n\tNO_WRITES\020\003\022\016\n\nNO_INSERTS\020\004BA\n*org.apa" +
-      "che.hadoop.hbase.protobuf.generatedB\013Quo" +
-      "taProtosH\001\210\001\001\240\001\001"
+      "ta\030\001 \001(\0132\024.hbase.pb.SpaceQuota\"b\n\020SpaceQ" +
+      "uotaStatus\0228\n\020violation_policy\030\001 \001(\0162\036.h" +
+      "base.pb.SpaceViolationPolicy\022\024\n\014in_viola" +
+      "tion\030\002 \001(\010\"p\n\022SpaceQuotaSnapshot\0220\n\014quot" +
+      "a_status\030\001 \001(\0132\032.hbase.pb.SpaceQuotaStat" +
+      "us\022\023\n\013quota_usage\030\002 \001(\004\022\023\n\013quota_limit\030\003" +
+      " \001(\004*&\n\nQuotaScope\022\013\n\007CLUSTER\020\001\022\013\n\007MACHI" +
+      "NE\020\002*v\n\014ThrottleType\022\022\n\016REQUEST_NUMBER\020\001" +
+      "\022\020\n\014REQUEST_SIZE\020\002\022\020\n\014WRITE_NUMBER\020\003\022\016\n\n" +
+      "WRITE_SIZE\020\004\022\017\n\013READ_NUMBER\020\005\022\r\n\tREAD_SI",
+      "ZE\020\006*$\n\tQuotaType\022\014\n\010THROTTLE\020\001\022\t\n\005SPACE" +
+      "\020\002*]\n\024SpaceViolationPolicy\022\013\n\007DISABLE\020\001\022" +
+      "\031\n\025NO_WRITES_COMPACTIONS\020\002\022\r\n\tNO_WRITES\020" +
+      "\003\022\016\n\nNO_INSERTS\020\004BA\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\013QuotaProtosH\001\210\001" +
+      "\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -7320,13 +7321,13 @@ public final class QuotaProtos {
           internal_static_hbase_pb_SpaceQuotaStatus_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SpaceQuotaStatus_descriptor,
-              new java.lang.String[] { "Policy", "InViolation", });
+              new java.lang.String[] { "ViolationPolicy", "InViolation", });
           internal_static_hbase_pb_SpaceQuotaSnapshot_descriptor =
             getDescriptor().getMessageTypes().get(8);
           internal_static_hbase_pb_SpaceQuotaSnapshot_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_hbase_pb_SpaceQuotaSnapshot_descriptor,
-              new java.lang.String[] { "Status", "Usage", "Limit", });
+              new java.lang.String[] { "QuotaStatus", "QuotaUsage", "QuotaLimit", });
           return null;
         }
       };

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-protocol/src/main/protobuf/Quota.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Quota.proto b/hbase-protocol/src/main/protobuf/Quota.proto
index d55918a..8e5c17c 100644
--- a/hbase-protocol/src/main/protobuf/Quota.proto
+++ b/hbase-protocol/src/main/protobuf/Quota.proto
@@ -100,14 +100,14 @@ message SpaceLimitRequest {
 // Represents the state of a quota on a table. Either the quota is not in violation
 // or it is in violatino there is a violation policy which should be in effect.
 message SpaceQuotaStatus {
-  optional SpaceViolationPolicy policy = 1;
+  optional SpaceViolationPolicy violation_policy = 1;
   optional bool in_violation = 2;
 }
 
 // Message stored in the value of hbase:quota table to denote the status of a table WRT
 // the quota applicable to it.
 message SpaceQuotaSnapshot {
-  optional SpaceQuotaStatus status = 1;
-  optional uint64 usage = 2;
-  optional uint64 limit = 3;
+  optional SpaceQuotaStatus quota_status = 1;
+  optional uint64 quota_usage = 2;
+  optional uint64 quota_limit = 3;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 33927de..89f085e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2218,10 +2218,10 @@ public class HMaster extends HRegionServer implements MasterServices {
         // (rather than explicit permissions) we'll do the check here instead of in the
         // coprocessor.
         MasterQuotaManager quotaManager = getMasterQuotaManager();
-        if (null != quotaManager) {
+        if (quotaManager != null) {
           if (quotaManager.isQuotaEnabled()) {
             Quotas quotaForTable = QuotaUtil.getTableQuota(getConnection(), tableName);
-            if (null != quotaForTable && quotaForTable.hasSpace()) {
+            if (quotaForTable != null && quotaForTable.hasSpace()) {
               SpaceViolationPolicy policy = quotaForTable.getSpace().getViolationPolicy();
               if (SpaceViolationPolicy.DISABLE == policy) {
                 throw new AccessDeniedException("Enabling the table '" + tableName

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 2ac6fee..296d4d7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -1951,7 +1951,8 @@ public class MasterRpcServices extends RSRpcServices
       MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
       final long now = EnvironmentEdgeManager.currentTime();
       for (RegionSpaceUse report : request.getSpaceUseList()) {
-        quotaManager.addRegionSize(HRegionInfo.convert(report.getRegion()), report.getSize(), now);
+        quotaManager.addRegionSize(HRegionInfo.convert(
+            report.getRegionInfo()), report.getRegionSize(), now);
       }
       return RegionSpaceUseReportResponse.newBuilder().build();
     } catch (Exception e) {
@@ -1967,14 +1968,14 @@ public class MasterRpcServices extends RSRpcServices
       MasterQuotaManager quotaManager = this.master.getMasterQuotaManager();
       GetSpaceQuotaRegionSizesResponse.Builder builder =
           GetSpaceQuotaRegionSizesResponse.newBuilder();
-      if (null != quotaManager) {
+      if (quotaManager != null) {
         Map<HRegionInfo,Long> regionSizes = quotaManager.snapshotRegionSizes();
         Map<TableName,Long> regionSizesByTable = new HashMap<>();
         // Translate hregioninfo+long -> tablename+long
         for (Entry<HRegionInfo,Long> entry : regionSizes.entrySet()) {
           final TableName tableName = entry.getKey().getTable();
           Long prevSize = regionSizesByTable.get(tableName);
-          if (null == prevSize) {
+          if (prevSize == null) {
             prevSize = 0L;
           }
           regionSizesByTable.put(tableName, prevSize + entry.getValue());
@@ -2000,7 +2001,7 @@ public class MasterRpcServices extends RSRpcServices
       master.checkInitialized();
       QuotaObserverChore quotaChore = this.master.getQuotaObserverChore();
       GetQuotaStatesResponse.Builder builder = GetQuotaStatesResponse.newBuilder();
-      if (null != quotaChore) {
+      if (quotaChore != null) {
         // The "current" view of all tables with quotas
         Map<TableName, SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
         for (Entry<TableName, SpaceQuotaSnapshot> entry : tableSnapshots.entrySet()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
index b5bc3d7..dfa0a87 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMaster.java
@@ -69,6 +69,8 @@ public class MetricsMaster {
 
   /**
    * Sets the number of space quotas defined.
+   *
+   * @see MetricsMasterQuotaSource#updateNumSpaceQuotas(long)
    */
   public void setNumSpaceQuotas(final long numSpaceQuotas) {
     masterQuotaSource.updateNumSpaceQuotas(numSpaceQuotas);
@@ -76,6 +78,8 @@ public class MetricsMaster {
 
   /**
    * Sets the number of table in violation of a space quota.
+   *
+   * @see MetricsMasterQuotaSource#updateNumTablesInSpaceQuotaViolation(long)
    */
   public void setNumTableInSpaceQuotaViolation(final long numTablesInViolation) {
     masterQuotaSource.updateNumTablesInSpaceQuotaViolation(numTablesInViolation);
@@ -83,13 +87,17 @@ public class MetricsMaster {
 
   /**
    * Sets the number of namespaces in violation of a space quota.
+   *
+   * @see MetricsMasterQuotaSource#updateNumNamespacesInSpaceQuotaViolation(long)
    */
   public void setNumNamespacesInSpaceQuotaViolation(final long numNamespacesInViolation) {
     masterQuotaSource.updateNumNamespacesInSpaceQuotaViolation(numNamespacesInViolation);
   }
 
   /**
-   * Sets the number of region size reports the master has seen.
+   * Sets the number of region size reports the master currently has in memory.
+   *
+   * @see MetricsMasterQuotaSource#updateNumCurrentSpaceQuotaRegionSizeReports(long)
    */
   public void setNumRegionSizeReports(final long numRegionReports) {
     masterQuotaSource.updateNumCurrentSpaceQuotaRegionSizeReports(numRegionReports);
@@ -97,6 +105,9 @@ public class MetricsMaster {
 
   /**
    * Sets the execution time of a period of the QuotaObserverChore.
+   *
+   * @param executionTime The execution time in milliseconds.
+   * @see MetricsMasterQuotaSource#incrementSpaceQuotaObserverChoreTime(long)
    */
   public void incrementQuotaObserverTime(final long executionTime) {
     masterQuotaSource.incrementSpaceQuotaObserverChoreTime(executionTime);

http://git-wip-us.apache.org/repos/asf/hbase/blob/97dcaffa/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
index cbf7ba5..ed37d19 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapperImpl.java
@@ -146,7 +146,7 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper {
   @Override
   public Map<String,Entry<Long,Long>> getTableSpaceUtilization() {
     QuotaObserverChore quotaChore = master.getQuotaObserverChore();
-    if (null == quotaChore) {
+    if (quotaChore == null) {
       return Collections.emptyMap();
     }
     Map<TableName,SpaceQuotaSnapshot> tableSnapshots = quotaChore.getTableQuotaSnapshots();
@@ -160,7 +160,7 @@ public class MetricsMasterWrapperImpl implements MetricsMasterWrapper {
   @Override
   public Map<String,Entry<Long,Long>> getNamespaceSpaceUtilization() {
     QuotaObserverChore quotaChore = master.getQuotaObserverChore();
-    if (null == quotaChore) {
+    if (quotaChore == null) {
       return Collections.emptyMap();
     }
     Map<String,SpaceQuotaSnapshot> namespaceSnapshots = quotaChore.getNamespaceQuotaSnapshots();