You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/07/10 18:30:34 UTC

svn commit: r1501834 [7/8] - in /hbase/trunk: hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ hbase-protocol/src/main/protobuf/ hbase-server/src/main/java/org/apache/hadoop/hbase/security/token/ hbase-server/src/test/java/org/a...

Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/WALProtos.java Wed Jul 10 16:30:32 2013
@@ -80,7 +80,7 @@ public final class WALProtos {
   public interface WALHeaderOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // optional bool hasCompression = 1;
+    // optional bool has_compression = 1;
     boolean hasHasCompression();
     boolean getHasCompression();
   }
@@ -113,8 +113,8 @@ public final class WALProtos {
     }
     
     private int bitField0_;
-    // optional bool hasCompression = 1;
-    public static final int HASCOMPRESSION_FIELD_NUMBER = 1;
+    // optional bool has_compression = 1;
+    public static final int HAS_COMPRESSION_FIELD_NUMBER = 1;
     private boolean hasCompression_;
     public boolean hasHasCompression() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -192,7 +192,7 @@ public final class WALProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasHasCompression()) {
-        hash = (37 * hash) + HASCOMPRESSION_FIELD_NUMBER;
+        hash = (37 * hash) + HAS_COMPRESSION_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getHasCompression());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -416,7 +416,7 @@ public final class WALProtos {
       
       private int bitField0_;
       
-      // optional bool hasCompression = 1;
+      // optional bool has_compression = 1;
       private boolean hasCompression_ ;
       public boolean hasHasCompression() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -451,23 +451,23 @@ public final class WALProtos {
   public interface WALKeyOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required bytes encodedRegionName = 1;
+    // required bytes encoded_region_name = 1;
     boolean hasEncodedRegionName();
     com.google.protobuf.ByteString getEncodedRegionName();
     
-    // required bytes tableName = 2;
+    // required bytes table_name = 2;
     boolean hasTableName();
     com.google.protobuf.ByteString getTableName();
     
-    // required uint64 logSequenceNumber = 3;
+    // required uint64 log_sequence_number = 3;
     boolean hasLogSequenceNumber();
     long getLogSequenceNumber();
     
-    // required uint64 writeTime = 4;
+    // required uint64 write_time = 4;
     boolean hasWriteTime();
     long getWriteTime();
     
-    // optional .UUID clusterId = 5;
+    // optional .UUID cluster_id = 5;
     boolean hasClusterId();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID getClusterId();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder getClusterIdOrBuilder();
@@ -482,7 +482,7 @@ public final class WALProtos {
     org.apache.hadoop.hbase.protobuf.generated.WALProtos.FamilyScopeOrBuilder getScopesOrBuilder(
         int index);
     
-    // optional uint32 followingKvCount = 7;
+    // optional uint32 following_kv_count = 7;
     boolean hasFollowingKvCount();
     int getFollowingKvCount();
   }
@@ -515,8 +515,8 @@ public final class WALProtos {
     }
     
     private int bitField0_;
-    // required bytes encodedRegionName = 1;
-    public static final int ENCODEDREGIONNAME_FIELD_NUMBER = 1;
+    // required bytes encoded_region_name = 1;
+    public static final int ENCODED_REGION_NAME_FIELD_NUMBER = 1;
     private com.google.protobuf.ByteString encodedRegionName_;
     public boolean hasEncodedRegionName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -525,8 +525,8 @@ public final class WALProtos {
       return encodedRegionName_;
     }
     
-    // required bytes tableName = 2;
-    public static final int TABLENAME_FIELD_NUMBER = 2;
+    // required bytes table_name = 2;
+    public static final int TABLE_NAME_FIELD_NUMBER = 2;
     private com.google.protobuf.ByteString tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -535,8 +535,8 @@ public final class WALProtos {
       return tableName_;
     }
     
-    // required uint64 logSequenceNumber = 3;
-    public static final int LOGSEQUENCENUMBER_FIELD_NUMBER = 3;
+    // required uint64 log_sequence_number = 3;
+    public static final int LOG_SEQUENCE_NUMBER_FIELD_NUMBER = 3;
     private long logSequenceNumber_;
     public boolean hasLogSequenceNumber() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -545,8 +545,8 @@ public final class WALProtos {
       return logSequenceNumber_;
     }
     
-    // required uint64 writeTime = 4;
-    public static final int WRITETIME_FIELD_NUMBER = 4;
+    // required uint64 write_time = 4;
+    public static final int WRITE_TIME_FIELD_NUMBER = 4;
     private long writeTime_;
     public boolean hasWriteTime() {
       return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -555,8 +555,8 @@ public final class WALProtos {
       return writeTime_;
     }
     
-    // optional .UUID clusterId = 5;
-    public static final int CLUSTERID_FIELD_NUMBER = 5;
+    // optional .UUID cluster_id = 5;
+    public static final int CLUSTER_ID_FIELD_NUMBER = 5;
     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID clusterId_;
     public boolean hasClusterId() {
       return ((bitField0_ & 0x00000010) == 0x00000010);
@@ -589,8 +589,8 @@ public final class WALProtos {
       return scopes_.get(index);
     }
     
-    // optional uint32 followingKvCount = 7;
-    public static final int FOLLOWINGKVCOUNT_FIELD_NUMBER = 7;
+    // optional uint32 following_kv_count = 7;
+    public static final int FOLLOWING_KV_COUNT_FIELD_NUMBER = 7;
     private int followingKvCount_;
     public boolean hasFollowingKvCount() {
       return ((bitField0_ & 0x00000020) == 0x00000020);
@@ -771,23 +771,23 @@ public final class WALProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasEncodedRegionName()) {
-        hash = (37 * hash) + ENCODEDREGIONNAME_FIELD_NUMBER;
+        hash = (37 * hash) + ENCODED_REGION_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getEncodedRegionName().hashCode();
       }
       if (hasTableName()) {
-        hash = (37 * hash) + TABLENAME_FIELD_NUMBER;
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getTableName().hashCode();
       }
       if (hasLogSequenceNumber()) {
-        hash = (37 * hash) + LOGSEQUENCENUMBER_FIELD_NUMBER;
+        hash = (37 * hash) + LOG_SEQUENCE_NUMBER_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getLogSequenceNumber());
       }
       if (hasWriteTime()) {
-        hash = (37 * hash) + WRITETIME_FIELD_NUMBER;
+        hash = (37 * hash) + WRITE_TIME_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getWriteTime());
       }
       if (hasClusterId()) {
-        hash = (37 * hash) + CLUSTERID_FIELD_NUMBER;
+        hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER;
         hash = (53 * hash) + getClusterId().hashCode();
       }
       if (getScopesCount() > 0) {
@@ -795,7 +795,7 @@ public final class WALProtos {
         hash = (53 * hash) + getScopesList().hashCode();
       }
       if (hasFollowingKvCount()) {
-        hash = (37 * hash) + FOLLOWINGKVCOUNT_FIELD_NUMBER;
+        hash = (37 * hash) + FOLLOWING_KV_COUNT_FIELD_NUMBER;
         hash = (53 * hash) + getFollowingKvCount();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -1178,7 +1178,7 @@ public final class WALProtos {
       
       private int bitField0_;
       
-      // required bytes encodedRegionName = 1;
+      // required bytes encoded_region_name = 1;
       private com.google.protobuf.ByteString encodedRegionName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasEncodedRegionName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -1202,7 +1202,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required bytes tableName = 2;
+      // required bytes table_name = 2;
       private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -1226,7 +1226,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required uint64 logSequenceNumber = 3;
+      // required uint64 log_sequence_number = 3;
       private long logSequenceNumber_ ;
       public boolean hasLogSequenceNumber() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -1247,7 +1247,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required uint64 writeTime = 4;
+      // required uint64 write_time = 4;
       private long writeTime_ ;
       public boolean hasWriteTime() {
         return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -1268,7 +1268,7 @@ public final class WALProtos {
         return this;
       }
       
-      // optional .UUID clusterId = 5;
+      // optional .UUID cluster_id = 5;
       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID clusterId_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUID.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.UUIDOrBuilder> clusterIdBuilder_;
@@ -1544,7 +1544,7 @@ public final class WALProtos {
         return scopesBuilder_;
       }
       
-      // optional uint32 followingKvCount = 7;
+      // optional uint32 following_kv_count = 7;
       private int followingKvCount_ ;
       public boolean hasFollowingKvCount() {
         return ((bitField0_ & 0x00000040) == 0x00000040);
@@ -1583,7 +1583,7 @@ public final class WALProtos {
     boolean hasFamily();
     com.google.protobuf.ByteString getFamily();
     
-    // required .ScopeType scopeType = 2;
+    // required .ScopeType scope_type = 2;
     boolean hasScopeType();
     org.apache.hadoop.hbase.protobuf.generated.WALProtos.ScopeType getScopeType();
   }
@@ -1626,8 +1626,8 @@ public final class WALProtos {
       return family_;
     }
     
-    // required .ScopeType scopeType = 2;
-    public static final int SCOPETYPE_FIELD_NUMBER = 2;
+    // required .ScopeType scope_type = 2;
+    public static final int SCOPE_TYPE_FIELD_NUMBER = 2;
     private org.apache.hadoop.hbase.protobuf.generated.WALProtos.ScopeType scopeType_;
     public boolean hasScopeType() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -1730,7 +1730,7 @@ public final class WALProtos {
         hash = (53 * hash) + getFamily().hashCode();
       }
       if (hasScopeType()) {
-        hash = (37 * hash) + SCOPETYPE_FIELD_NUMBER;
+        hash = (37 * hash) + SCOPE_TYPE_FIELD_NUMBER;
         hash = (53 * hash) + hashEnum(getScopeType());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -2006,7 +2006,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required .ScopeType scopeType = 2;
+      // required .ScopeType scope_type = 2;
       private org.apache.hadoop.hbase.protobuf.generated.WALProtos.ScopeType scopeType_ = org.apache.hadoop.hbase.protobuf.generated.WALProtos.ScopeType.REPLICATION_SCOPE_LOCAL;
       public boolean hasScopeType() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2044,29 +2044,29 @@ public final class WALProtos {
   public interface CompactionDescriptorOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required bytes tableName = 1;
+    // required bytes table_name = 1;
     boolean hasTableName();
     com.google.protobuf.ByteString getTableName();
     
-    // required bytes encodedRegionName = 2;
+    // required bytes encoded_region_name = 2;
     boolean hasEncodedRegionName();
     com.google.protobuf.ByteString getEncodedRegionName();
     
-    // required bytes familyName = 3;
+    // required bytes family_name = 3;
     boolean hasFamilyName();
     com.google.protobuf.ByteString getFamilyName();
     
-    // repeated string compactionInput = 4;
+    // repeated string compaction_input = 4;
     java.util.List<String> getCompactionInputList();
     int getCompactionInputCount();
     String getCompactionInput(int index);
     
-    // repeated string compactionOutput = 5;
+    // repeated string compaction_output = 5;
     java.util.List<String> getCompactionOutputList();
     int getCompactionOutputCount();
     String getCompactionOutput(int index);
     
-    // required string storeHomeDir = 6;
+    // required string store_home_dir = 6;
     boolean hasStoreHomeDir();
     String getStoreHomeDir();
   }
@@ -2099,8 +2099,8 @@ public final class WALProtos {
     }
     
     private int bitField0_;
-    // required bytes tableName = 1;
-    public static final int TABLENAME_FIELD_NUMBER = 1;
+    // required bytes table_name = 1;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
     private com.google.protobuf.ByteString tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -2109,8 +2109,8 @@ public final class WALProtos {
       return tableName_;
     }
     
-    // required bytes encodedRegionName = 2;
-    public static final int ENCODEDREGIONNAME_FIELD_NUMBER = 2;
+    // required bytes encoded_region_name = 2;
+    public static final int ENCODED_REGION_NAME_FIELD_NUMBER = 2;
     private com.google.protobuf.ByteString encodedRegionName_;
     public boolean hasEncodedRegionName() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2119,8 +2119,8 @@ public final class WALProtos {
       return encodedRegionName_;
     }
     
-    // required bytes familyName = 3;
-    public static final int FAMILYNAME_FIELD_NUMBER = 3;
+    // required bytes family_name = 3;
+    public static final int FAMILY_NAME_FIELD_NUMBER = 3;
     private com.google.protobuf.ByteString familyName_;
     public boolean hasFamilyName() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -2129,8 +2129,8 @@ public final class WALProtos {
       return familyName_;
     }
     
-    // repeated string compactionInput = 4;
-    public static final int COMPACTIONINPUT_FIELD_NUMBER = 4;
+    // repeated string compaction_input = 4;
+    public static final int COMPACTION_INPUT_FIELD_NUMBER = 4;
     private com.google.protobuf.LazyStringList compactionInput_;
     public java.util.List<String>
         getCompactionInputList() {
@@ -2143,8 +2143,8 @@ public final class WALProtos {
       return compactionInput_.get(index);
     }
     
-    // repeated string compactionOutput = 5;
-    public static final int COMPACTIONOUTPUT_FIELD_NUMBER = 5;
+    // repeated string compaction_output = 5;
+    public static final int COMPACTION_OUTPUT_FIELD_NUMBER = 5;
     private com.google.protobuf.LazyStringList compactionOutput_;
     public java.util.List<String>
         getCompactionOutputList() {
@@ -2157,8 +2157,8 @@ public final class WALProtos {
       return compactionOutput_.get(index);
     }
     
-    // required string storeHomeDir = 6;
-    public static final int STOREHOMEDIR_FIELD_NUMBER = 6;
+    // required string store_home_dir = 6;
+    public static final int STORE_HOME_DIR_FIELD_NUMBER = 6;
     private java.lang.Object storeHomeDir_;
     public boolean hasStoreHomeDir() {
       return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -2343,27 +2343,27 @@ public final class WALProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasTableName()) {
-        hash = (37 * hash) + TABLENAME_FIELD_NUMBER;
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getTableName().hashCode();
       }
       if (hasEncodedRegionName()) {
-        hash = (37 * hash) + ENCODEDREGIONNAME_FIELD_NUMBER;
+        hash = (37 * hash) + ENCODED_REGION_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getEncodedRegionName().hashCode();
       }
       if (hasFamilyName()) {
-        hash = (37 * hash) + FAMILYNAME_FIELD_NUMBER;
+        hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getFamilyName().hashCode();
       }
       if (getCompactionInputCount() > 0) {
-        hash = (37 * hash) + COMPACTIONINPUT_FIELD_NUMBER;
+        hash = (37 * hash) + COMPACTION_INPUT_FIELD_NUMBER;
         hash = (53 * hash) + getCompactionInputList().hashCode();
       }
       if (getCompactionOutputCount() > 0) {
-        hash = (37 * hash) + COMPACTIONOUTPUT_FIELD_NUMBER;
+        hash = (37 * hash) + COMPACTION_OUTPUT_FIELD_NUMBER;
         hash = (53 * hash) + getCompactionOutputList().hashCode();
       }
       if (hasStoreHomeDir()) {
-        hash = (37 * hash) + STOREHOMEDIR_FIELD_NUMBER;
+        hash = (37 * hash) + STORE_HOME_DIR_FIELD_NUMBER;
         hash = (53 * hash) + getStoreHomeDir().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -2691,7 +2691,7 @@ public final class WALProtos {
       
       private int bitField0_;
       
-      // required bytes tableName = 1;
+      // required bytes table_name = 1;
       private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -2715,7 +2715,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required bytes encodedRegionName = 2;
+      // required bytes encoded_region_name = 2;
       private com.google.protobuf.ByteString encodedRegionName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasEncodedRegionName() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2739,7 +2739,7 @@ public final class WALProtos {
         return this;
       }
       
-      // required bytes familyName = 3;
+      // required bytes family_name = 3;
       private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasFamilyName() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -2763,7 +2763,7 @@ public final class WALProtos {
         return this;
       }
       
-      // repeated string compactionInput = 4;
+      // repeated string compaction_input = 4;
       private com.google.protobuf.LazyStringList compactionInput_ = com.google.protobuf.LazyStringArrayList.EMPTY;
       private void ensureCompactionInputIsMutable() {
         if (!((bitField0_ & 0x00000008) == 0x00000008)) {
@@ -2819,7 +2819,7 @@ public final class WALProtos {
         onChanged();
       }
       
-      // repeated string compactionOutput = 5;
+      // repeated string compaction_output = 5;
       private com.google.protobuf.LazyStringList compactionOutput_ = com.google.protobuf.LazyStringArrayList.EMPTY;
       private void ensureCompactionOutputIsMutable() {
         if (!((bitField0_ & 0x00000010) == 0x00000010)) {
@@ -2875,7 +2875,7 @@ public final class WALProtos {
         onChanged();
       }
       
-      // required string storeHomeDir = 6;
+      // required string store_home_dir = 6;
       private java.lang.Object storeHomeDir_ = "";
       public boolean hasStoreHomeDir() {
         return ((bitField0_ & 0x00000020) == 0x00000020);
@@ -3256,22 +3256,23 @@ public final class WALProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\tWAL.proto\032\013hbase.proto\"#\n\tWALHeader\022\026\n" +
-      "\016hasCompression\030\001 \001(\010\"\266\001\n\006WALKey\022\031\n\021enco" +
-      "dedRegionName\030\001 \002(\014\022\021\n\ttableName\030\002 \002(\014\022\031" +
-      "\n\021logSequenceNumber\030\003 \002(\004\022\021\n\twriteTime\030\004" +
-      " \002(\004\022\030\n\tclusterId\030\005 \001(\0132\005.UUID\022\034\n\006scopes" +
-      "\030\006 \003(\0132\014.FamilyScope\022\030\n\020followingKvCount" +
-      "\030\007 \001(\r\"<\n\013FamilyScope\022\016\n\006family\030\001 \002(\014\022\035\n" +
-      "\tscopeType\030\002 \002(\0162\n.ScopeType\"\241\001\n\024Compact" +
-      "ionDescriptor\022\021\n\ttableName\030\001 \002(\014\022\031\n\021enco" +
-      "dedRegionName\030\002 \002(\014\022\022\n\nfamilyName\030\003 \002(\014\022",
-      "\027\n\017compactionInput\030\004 \003(\t\022\030\n\020compactionOu" +
-      "tput\030\005 \003(\t\022\024\n\014storeHomeDir\030\006 \002(\t\"\014\n\nWALT" +
-      "railer*F\n\tScopeType\022\033\n\027REPLICATION_SCOPE" +
-      "_LOCAL\020\000\022\034\n\030REPLICATION_SCOPE_GLOBAL\020\001B?" +
-      "\n*org.apache.hadoop.hbase.protobuf.gener" +
-      "atedB\tWALProtosH\001\210\001\000\240\001\001"
+      "\n\tWAL.proto\032\013hbase.proto\"$\n\tWALHeader\022\027\n" +
+      "\017has_compression\030\001 \001(\010\"\277\001\n\006WALKey\022\033\n\023enc" +
+      "oded_region_name\030\001 \002(\014\022\022\n\ntable_name\030\002 \002" +
+      "(\014\022\033\n\023log_sequence_number\030\003 \002(\004\022\022\n\nwrite" +
+      "_time\030\004 \002(\004\022\031\n\ncluster_id\030\005 \001(\0132\005.UUID\022\034" +
+      "\n\006scopes\030\006 \003(\0132\014.FamilyScope\022\032\n\022followin" +
+      "g_kv_count\030\007 \001(\r\"=\n\013FamilyScope\022\016\n\006famil" +
+      "y\030\001 \002(\014\022\036\n\nscope_type\030\002 \002(\0162\n.ScopeType\"" +
+      "\251\001\n\024CompactionDescriptor\022\022\n\ntable_name\030\001" +
+      " \002(\014\022\033\n\023encoded_region_name\030\002 \002(\014\022\023\n\013fam",
+      "ily_name\030\003 \002(\014\022\030\n\020compaction_input\030\004 \003(\t" +
+      "\022\031\n\021compaction_output\030\005 \003(\t\022\026\n\016store_hom" +
+      "e_dir\030\006 \002(\t\"\014\n\nWALTrailer*F\n\tScopeType\022\033" +
+      "\n\027REPLICATION_SCOPE_LOCAL\020\000\022\034\n\030REPLICATI" +
+      "ON_SCOPE_GLOBAL\020\001B?\n*org.apache.hadoop.h" +
+      "base.protobuf.generatedB\tWALProtosH\001\210\001\000\240" +
+      "\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Wed Jul 10 16:30:32 2013
@@ -16,7 +16,7 @@ public final class ZooKeeperProtos {
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
     
-    // optional uint32 rpcVersion = 2;
+    // optional uint32 rpc_version = 2;
     boolean hasRpcVersion();
     int getRpcVersion();
   }
@@ -62,8 +62,8 @@ public final class ZooKeeperProtos {
       return server_;
     }
     
-    // optional uint32 rpcVersion = 2;
-    public static final int RPCVERSION_FIELD_NUMBER = 2;
+    // optional uint32 rpc_version = 2;
+    public static final int RPC_VERSION_FIELD_NUMBER = 2;
     private int rpcVersion_;
     public boolean hasRpcVersion() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -166,7 +166,7 @@ public final class ZooKeeperProtos {
         hash = (53 * hash) + getServer().hashCode();
       }
       if (hasRpcVersion()) {
-        hash = (37 * hash) + RPCVERSION_FIELD_NUMBER;
+        hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
         hash = (53 * hash) + getRpcVersion();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -515,7 +515,7 @@ public final class ZooKeeperProtos {
         return serverBuilder_;
       }
       
-      // optional uint32 rpcVersion = 2;
+      // optional uint32 rpc_version = 2;
       private int rpcVersion_ ;
       public boolean hasRpcVersion() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -555,7 +555,7 @@ public final class ZooKeeperProtos {
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
     
-    // optional uint32 rpcVersion = 2;
+    // optional uint32 rpc_version = 2;
     boolean hasRpcVersion();
     int getRpcVersion();
   }
@@ -601,8 +601,8 @@ public final class ZooKeeperProtos {
       return master_;
     }
     
-    // optional uint32 rpcVersion = 2;
-    public static final int RPCVERSION_FIELD_NUMBER = 2;
+    // optional uint32 rpc_version = 2;
+    public static final int RPC_VERSION_FIELD_NUMBER = 2;
     private int rpcVersion_;
     public boolean hasRpcVersion() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -705,7 +705,7 @@ public final class ZooKeeperProtos {
         hash = (53 * hash) + getMaster().hashCode();
       }
       if (hasRpcVersion()) {
-        hash = (37 * hash) + RPCVERSION_FIELD_NUMBER;
+        hash = (37 * hash) + RPC_VERSION_FIELD_NUMBER;
         hash = (53 * hash) + getRpcVersion();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -1054,7 +1054,7 @@ public final class ZooKeeperProtos {
         return masterBuilder_;
       }
       
-      // optional uint32 rpcVersion = 2;
+      // optional uint32 rpc_version = 2;
       private int rpcVersion_ ;
       public boolean hasRpcVersion() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -1089,7 +1089,7 @@ public final class ZooKeeperProtos {
   public interface ClusterUpOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required string startDate = 1;
+    // required string start_date = 1;
     boolean hasStartDate();
     String getStartDate();
   }
@@ -1122,8 +1122,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // required string startDate = 1;
-    public static final int STARTDATE_FIELD_NUMBER = 1;
+    // required string start_date = 1;
+    public static final int START_DATE_FIELD_NUMBER = 1;
     private java.lang.Object startDate_;
     public boolean hasStartDate() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -1227,7 +1227,7 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasStartDate()) {
-        hash = (37 * hash) + STARTDATE_FIELD_NUMBER;
+        hash = (37 * hash) + START_DATE_FIELD_NUMBER;
         hash = (53 * hash) + getStartDate().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -1455,7 +1455,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // required string startDate = 1;
+      // required string start_date = 1;
       private java.lang.Object startDate_ = "";
       public boolean hasStartDate() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -1505,19 +1505,19 @@ public final class ZooKeeperProtos {
   public interface RegionTransitionOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required uint32 eventTypeCode = 1;
+    // required uint32 event_type_code = 1;
     boolean hasEventTypeCode();
     int getEventTypeCode();
     
-    // required bytes regionName = 2;
+    // required bytes region_name = 2;
     boolean hasRegionName();
     com.google.protobuf.ByteString getRegionName();
     
-    // required uint64 createTime = 3;
+    // required uint64 create_time = 3;
     boolean hasCreateTime();
     long getCreateTime();
     
-    // required .ServerName serverName = 4;
+    // required .ServerName server_name = 4;
     boolean hasServerName();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
@@ -1555,8 +1555,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // required uint32 eventTypeCode = 1;
-    public static final int EVENTTYPECODE_FIELD_NUMBER = 1;
+    // required uint32 event_type_code = 1;
+    public static final int EVENT_TYPE_CODE_FIELD_NUMBER = 1;
     private int eventTypeCode_;
     public boolean hasEventTypeCode() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -1565,8 +1565,8 @@ public final class ZooKeeperProtos {
       return eventTypeCode_;
     }
     
-    // required bytes regionName = 2;
-    public static final int REGIONNAME_FIELD_NUMBER = 2;
+    // required bytes region_name = 2;
+    public static final int REGION_NAME_FIELD_NUMBER = 2;
     private com.google.protobuf.ByteString regionName_;
     public boolean hasRegionName() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -1575,8 +1575,8 @@ public final class ZooKeeperProtos {
       return regionName_;
     }
     
-    // required uint64 createTime = 3;
-    public static final int CREATETIME_FIELD_NUMBER = 3;
+    // required uint64 create_time = 3;
+    public static final int CREATE_TIME_FIELD_NUMBER = 3;
     private long createTime_;
     public boolean hasCreateTime() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -1585,8 +1585,8 @@ public final class ZooKeeperProtos {
       return createTime_;
     }
     
-    // required .ServerName serverName = 4;
-    public static final int SERVERNAME_FIELD_NUMBER = 4;
+    // required .ServerName server_name = 4;
+    public static final int SERVER_NAME_FIELD_NUMBER = 4;
     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
     public boolean hasServerName() {
       return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -1749,19 +1749,19 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasEventTypeCode()) {
-        hash = (37 * hash) + EVENTTYPECODE_FIELD_NUMBER;
+        hash = (37 * hash) + EVENT_TYPE_CODE_FIELD_NUMBER;
         hash = (53 * hash) + getEventTypeCode();
       }
       if (hasRegionName()) {
-        hash = (37 * hash) + REGIONNAME_FIELD_NUMBER;
+        hash = (37 * hash) + REGION_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getRegionName().hashCode();
       }
       if (hasCreateTime()) {
-        hash = (37 * hash) + CREATETIME_FIELD_NUMBER;
+        hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getCreateTime());
       }
       if (hasServerName()) {
-        hash = (37 * hash) + SERVERNAME_FIELD_NUMBER;
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getServerName().hashCode();
       }
       if (hasPayload()) {
@@ -2078,7 +2078,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // required uint32 eventTypeCode = 1;
+      // required uint32 event_type_code = 1;
       private int eventTypeCode_ ;
       public boolean hasEventTypeCode() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -2099,7 +2099,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // required bytes regionName = 2;
+      // required bytes region_name = 2;
       private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasRegionName() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2123,7 +2123,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // required uint64 createTime = 3;
+      // required uint64 create_time = 3;
       private long createTime_ ;
       public boolean hasCreateTime() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -2144,7 +2144,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // required .ServerName serverName = 4;
+      // required .ServerName server_name = 4;
       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
@@ -2276,7 +2276,7 @@ public final class ZooKeeperProtos {
     boolean hasState();
     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState();
     
-    // required .ServerName serverName = 2;
+    // required .ServerName server_name = 2;
     boolean hasServerName();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
@@ -2398,8 +2398,8 @@ public final class ZooKeeperProtos {
       return state_;
     }
     
-    // required .ServerName serverName = 2;
-    public static final int SERVERNAME_FIELD_NUMBER = 2;
+    // required .ServerName server_name = 2;
+    public static final int SERVER_NAME_FIELD_NUMBER = 2;
     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
     public boolean hasServerName() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -2509,7 +2509,7 @@ public final class ZooKeeperProtos {
         hash = (53 * hash) + hashEnum(getState());
       }
       if (hasServerName()) {
-        hash = (37 * hash) + SERVERNAME_FIELD_NUMBER;
+        hash = (37 * hash) + SERVER_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getServerName().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -2802,7 +2802,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // required .ServerName serverName = 2;
+      // required .ServerName server_name = 2;
       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
@@ -4621,7 +4621,7 @@ public final class ZooKeeperProtos {
   public interface ReplicationLockOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required string lockOwner = 1;
+    // required string lock_owner = 1;
     boolean hasLockOwner();
     String getLockOwner();
   }
@@ -4654,8 +4654,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // required string lockOwner = 1;
-    public static final int LOCKOWNER_FIELD_NUMBER = 1;
+    // required string lock_owner = 1;
+    public static final int LOCK_OWNER_FIELD_NUMBER = 1;
     private java.lang.Object lockOwner_;
     public boolean hasLockOwner() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -4759,7 +4759,7 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasLockOwner()) {
-        hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER;
+        hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
         hash = (53 * hash) + getLockOwner().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -4987,7 +4987,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // required string lockOwner = 1;
+      // required string lock_owner = 1;
       private java.lang.Object lockOwner_ = "";
       public boolean hasLockOwner() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -5037,20 +5037,20 @@ public final class ZooKeeperProtos {
   public interface TableLockOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // optional bytes tableName = 1;
+    // optional bytes table_name = 1;
     boolean hasTableName();
     com.google.protobuf.ByteString getTableName();
     
-    // optional .ServerName lockOwner = 2;
+    // optional .ServerName lock_owner = 2;
     boolean hasLockOwner();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getLockOwner();
     org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getLockOwnerOrBuilder();
     
-    // optional int64 threadId = 3;
+    // optional int64 thread_id = 3;
     boolean hasThreadId();
     long getThreadId();
     
-    // optional bool isShared = 4;
+    // optional bool is_shared = 4;
     boolean hasIsShared();
     boolean getIsShared();
     
@@ -5058,7 +5058,7 @@ public final class ZooKeeperProtos {
     boolean hasPurpose();
     String getPurpose();
     
-    // optional int64 createTime = 6;
+    // optional int64 create_time = 6;
     boolean hasCreateTime();
     long getCreateTime();
   }
@@ -5091,8 +5091,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // optional bytes tableName = 1;
-    public static final int TABLENAME_FIELD_NUMBER = 1;
+    // optional bytes table_name = 1;
+    public static final int TABLE_NAME_FIELD_NUMBER = 1;
     private com.google.protobuf.ByteString tableName_;
     public boolean hasTableName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -5101,8 +5101,8 @@ public final class ZooKeeperProtos {
       return tableName_;
     }
     
-    // optional .ServerName lockOwner = 2;
-    public static final int LOCKOWNER_FIELD_NUMBER = 2;
+    // optional .ServerName lock_owner = 2;
+    public static final int LOCK_OWNER_FIELD_NUMBER = 2;
     private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_;
     public boolean hasLockOwner() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -5114,8 +5114,8 @@ public final class ZooKeeperProtos {
       return lockOwner_;
     }
     
-    // optional int64 threadId = 3;
-    public static final int THREADID_FIELD_NUMBER = 3;
+    // optional int64 thread_id = 3;
+    public static final int THREAD_ID_FIELD_NUMBER = 3;
     private long threadId_;
     public boolean hasThreadId() {
       return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -5124,8 +5124,8 @@ public final class ZooKeeperProtos {
       return threadId_;
     }
     
-    // optional bool isShared = 4;
-    public static final int ISSHARED_FIELD_NUMBER = 4;
+    // optional bool is_shared = 4;
+    public static final int IS_SHARED_FIELD_NUMBER = 4;
     private boolean isShared_;
     public boolean hasIsShared() {
       return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -5166,8 +5166,8 @@ public final class ZooKeeperProtos {
       }
     }
     
-    // optional int64 createTime = 6;
-    public static final int CREATETIME_FIELD_NUMBER = 6;
+    // optional int64 create_time = 6;
+    public static final int CREATE_TIME_FIELD_NUMBER = 6;
     private long createTime_;
     public boolean hasCreateTime() {
       return ((bitField0_ & 0x00000020) == 0x00000020);
@@ -5316,19 +5316,19 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasTableName()) {
-        hash = (37 * hash) + TABLENAME_FIELD_NUMBER;
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getTableName().hashCode();
       }
       if (hasLockOwner()) {
-        hash = (37 * hash) + LOCKOWNER_FIELD_NUMBER;
+        hash = (37 * hash) + LOCK_OWNER_FIELD_NUMBER;
         hash = (53 * hash) + getLockOwner().hashCode();
       }
       if (hasThreadId()) {
-        hash = (37 * hash) + THREADID_FIELD_NUMBER;
+        hash = (37 * hash) + THREAD_ID_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getThreadId());
       }
       if (hasIsShared()) {
-        hash = (37 * hash) + ISSHARED_FIELD_NUMBER;
+        hash = (37 * hash) + IS_SHARED_FIELD_NUMBER;
         hash = (53 * hash) + hashBoolean(getIsShared());
       }
       if (hasPurpose()) {
@@ -5336,7 +5336,7 @@ public final class ZooKeeperProtos {
         hash = (53 * hash) + getPurpose().hashCode();
       }
       if (hasCreateTime()) {
-        hash = (37 * hash) + CREATETIME_FIELD_NUMBER;
+        hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getCreateTime());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -5649,7 +5649,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // optional bytes tableName = 1;
+      // optional bytes table_name = 1;
       private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasTableName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -5673,7 +5673,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // optional .ServerName lockOwner = 2;
+      // optional .ServerName lock_owner = 2;
       private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName lockOwner_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
       private com.google.protobuf.SingleFieldBuilder<
           org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> lockOwnerBuilder_;
@@ -5763,7 +5763,7 @@ public final class ZooKeeperProtos {
         return lockOwnerBuilder_;
       }
       
-      // optional int64 threadId = 3;
+      // optional int64 thread_id = 3;
       private long threadId_ ;
       public boolean hasThreadId() {
         return ((bitField0_ & 0x00000004) == 0x00000004);
@@ -5784,7 +5784,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // optional bool isShared = 4;
+      // optional bool is_shared = 4;
       private boolean isShared_ ;
       public boolean hasIsShared() {
         return ((bitField0_ & 0x00000008) == 0x00000008);
@@ -5841,7 +5841,7 @@ public final class ZooKeeperProtos {
         onChanged();
       }
       
-      // optional int64 createTime = 6;
+      // optional int64 create_time = 6;
       private long createTime_ ;
       public boolean hasCreateTime() {
         return ((bitField0_ & 0x00000020) == 0x00000020);
@@ -5876,11 +5876,11 @@ public final class ZooKeeperProtos {
   public interface StoreSequenceIdOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required bytes familyName = 1;
+    // required bytes family_name = 1;
     boolean hasFamilyName();
     com.google.protobuf.ByteString getFamilyName();
     
-    // required uint64 sequenceId = 2;
+    // required uint64 sequence_id = 2;
     boolean hasSequenceId();
     long getSequenceId();
   }
@@ -5913,8 +5913,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // required bytes familyName = 1;
-    public static final int FAMILYNAME_FIELD_NUMBER = 1;
+    // required bytes family_name = 1;
+    public static final int FAMILY_NAME_FIELD_NUMBER = 1;
     private com.google.protobuf.ByteString familyName_;
     public boolean hasFamilyName() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -5923,8 +5923,8 @@ public final class ZooKeeperProtos {
       return familyName_;
     }
     
-    // required uint64 sequenceId = 2;
-    public static final int SEQUENCEID_FIELD_NUMBER = 2;
+    // required uint64 sequence_id = 2;
+    public static final int SEQUENCE_ID_FIELD_NUMBER = 2;
     private long sequenceId_;
     public boolean hasSequenceId() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -6023,11 +6023,11 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasFamilyName()) {
-        hash = (37 * hash) + FAMILYNAME_FIELD_NUMBER;
+        hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
         hash = (53 * hash) + getFamilyName().hashCode();
       }
       if (hasSequenceId()) {
-        hash = (37 * hash) + SEQUENCEID_FIELD_NUMBER;
+        hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getSequenceId());
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -6273,7 +6273,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // required bytes familyName = 1;
+      // required bytes family_name = 1;
       private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY;
       public boolean hasFamilyName() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -6297,7 +6297,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // required uint64 sequenceId = 2;
+      // required uint64 sequence_id = 2;
       private long sequenceId_ ;
       public boolean hasSequenceId() {
         return ((bitField0_ & 0x00000002) == 0x00000002);
@@ -6332,11 +6332,11 @@ public final class ZooKeeperProtos {
   public interface RegionStoreSequenceIdsOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
     
-    // required uint64 lastFlushedSequenceId = 1;
+    // required uint64 last_flushed_sequence_id = 1;
     boolean hasLastFlushedSequenceId();
     long getLastFlushedSequenceId();
     
-    // repeated .StoreSequenceId storeSequenceId = 2;
+    // repeated .StoreSequenceId store_sequence_id = 2;
     java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> 
         getStoreSequenceIdList();
     org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index);
@@ -6375,8 +6375,8 @@ public final class ZooKeeperProtos {
     }
     
     private int bitField0_;
-    // required uint64 lastFlushedSequenceId = 1;
-    public static final int LASTFLUSHEDSEQUENCEID_FIELD_NUMBER = 1;
+    // required uint64 last_flushed_sequence_id = 1;
+    public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1;
     private long lastFlushedSequenceId_;
     public boolean hasLastFlushedSequenceId() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -6385,8 +6385,8 @@ public final class ZooKeeperProtos {
       return lastFlushedSequenceId_;
     }
     
-    // repeated .StoreSequenceId storeSequenceId = 2;
-    public static final int STORESEQUENCEID_FIELD_NUMBER = 2;
+    // repeated .StoreSequenceId store_sequence_id = 2;
+    public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2;
     private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> storeSequenceId_;
     public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> getStoreSequenceIdList() {
       return storeSequenceId_;
@@ -6495,11 +6495,11 @@ public final class ZooKeeperProtos {
       int hash = 41;
       hash = (19 * hash) + getDescriptorForType().hashCode();
       if (hasLastFlushedSequenceId()) {
-        hash = (37 * hash) + LASTFLUSHEDSEQUENCEID_FIELD_NUMBER;
+        hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER;
         hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
       }
       if (getStoreSequenceIdCount() > 0) {
-        hash = (37 * hash) + STORESEQUENCEID_FIELD_NUMBER;
+        hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER;
         hash = (53 * hash) + getStoreSequenceIdList().hashCode();
       }
       hash = (29 * hash) + getUnknownFields().hashCode();
@@ -6781,7 +6781,7 @@ public final class ZooKeeperProtos {
       
       private int bitField0_;
       
-      // required uint64 lastFlushedSequenceId = 1;
+      // required uint64 last_flushed_sequence_id = 1;
       private long lastFlushedSequenceId_ ;
       public boolean hasLastFlushedSequenceId() {
         return ((bitField0_ & 0x00000001) == 0x00000001);
@@ -6802,7 +6802,7 @@ public final class ZooKeeperProtos {
         return this;
       }
       
-      // repeated .StoreSequenceId storeSequenceId = 2;
+      // repeated .StoreSequenceId store_sequence_id = 2;
       private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> storeSequenceId_ =
         java.util.Collections.emptyList();
       private void ensureStoreSequenceIdIsMutable() {
@@ -7073,36 +7073,36 @@ public final class ZooKeeperProtos {
       descriptor;
   static {
     java.lang.String[] descriptorData = {
-      "\n\017ZooKeeper.proto\032\013hbase.proto\"C\n\020MetaRe" +
+      "\n\017ZooKeeper.proto\032\013hbase.proto\"D\n\020MetaRe" +
       "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\022" +
-      "\022\n\nrpcVersion\030\002 \001(\r\"9\n\006Master\022\033\n\006master\030" +
-      "\001 \002(\0132\013.ServerName\022\022\n\nrpcVersion\030\002 \001(\r\"\036" +
-      "\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" +
-      "nTransition\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nre" +
-      "gionName\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022\037\n\nse" +
-      "rverName\030\004 \002(\0132\013.ServerName\022\017\n\007payload\030\005" +
-      " \001(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.S" +
-      "plitLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013.",
-      "ServerName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005O" +
-      "WNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004\"" +
-      "n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table.State:\007E" +
-      "NABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISABLED" +
-      "\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003\"%\n\017Repli" +
-      "cationPeer\022\022\n\nclusterkey\030\001 \002(\t\"^\n\020Replic" +
-      "ationState\022&\n\005state\030\001 \002(\0162\027.ReplicationS" +
-      "tate.State\"\"\n\005State\022\013\n\007ENABLED\020\000\022\014\n\010DISA" +
-      "BLED\020\001\"+\n\027ReplicationHLogPosition\022\020\n\010pos" +
-      "ition\030\001 \002(\003\"$\n\017ReplicationLock\022\021\n\tlockOw",
-      "ner\030\001 \002(\t\"\207\001\n\tTableLock\022\021\n\ttableName\030\001 \001" +
-      "(\014\022\036\n\tlockOwner\030\002 \001(\0132\013.ServerName\022\020\n\010th" +
-      "readId\030\003 \001(\003\022\020\n\010isShared\030\004 \001(\010\022\017\n\007purpos" +
-      "e\030\005 \001(\t\022\022\n\ncreateTime\030\006 \001(\003\"9\n\017StoreSequ" +
-      "enceId\022\022\n\nfamilyName\030\001 \002(\014\022\022\n\nsequenceId" +
-      "\030\002 \002(\004\"b\n\026RegionStoreSequenceIds\022\035\n\025last" +
-      "FlushedSequenceId\030\001 \002(\004\022)\n\017storeSequence" +
-      "Id\030\002 \003(\0132\020.StoreSequenceIdBE\n*org.apache" +
-      ".hadoop.hbase.protobuf.generatedB\017ZooKee" +
-      "perProtosH\001\210\001\001\240\001\001"
+      "\023\n\013rpc_version\030\002 \001(\r\":\n\006Master\022\033\n\006master" +
+      "\030\001 \002(\0132\013.ServerName\022\023\n\013rpc_version\030\002 \001(\r" +
+      "\"\037\n\tClusterUp\022\022\n\nstart_date\030\001 \002(\t\"\210\001\n\020Re" +
+      "gionTransition\022\027\n\017event_type_code\030\001 \002(\r\022" +
+      "\023\n\013region_name\030\002 \002(\014\022\023\n\013create_time\030\003 \002(" +
+      "\004\022 \n\013server_name\030\004 \002(\0132\013.ServerName\022\017\n\007p" +
+      "ayload\030\005 \001(\014\"\231\001\n\014SplitLogTask\022\"\n\005state\030\001" +
+      " \002(\0162\023.SplitLogTask.State\022 \n\013server_name",
+      "\030\002 \002(\0132\013.ServerName\"C\n\005State\022\016\n\nUNASSIGN" +
+      "ED\020\000\022\t\n\005OWNED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022" +
+      "\007\n\003ERR\020\004\"n\n\005Table\022$\n\005state\030\001 \002(\0162\014.Table" +
+      ".State:\007ENABLED\"?\n\005State\022\013\n\007ENABLED\020\000\022\014\n" +
+      "\010DISABLED\020\001\022\r\n\tDISABLING\020\002\022\014\n\010ENABLING\020\003" +
+      "\"%\n\017ReplicationPeer\022\022\n\nclusterkey\030\001 \002(\t\"" +
+      "^\n\020ReplicationState\022&\n\005state\030\001 \002(\0162\027.Rep" +
+      "licationState.State\"\"\n\005State\022\013\n\007ENABLED\020" +
+      "\000\022\014\n\010DISABLED\020\001\"+\n\027ReplicationHLogPositi" +
+      "on\022\020\n\010position\030\001 \002(\003\"%\n\017ReplicationLock\022",
+      "\022\n\nlock_owner\030\001 \002(\t\"\214\001\n\tTableLock\022\022\n\ntab" +
+      "le_name\030\001 \001(\014\022\037\n\nlock_owner\030\002 \001(\0132\013.Serv" +
+      "erName\022\021\n\tthread_id\030\003 \001(\003\022\021\n\tis_shared\030\004" +
+      " \001(\010\022\017\n\007purpose\030\005 \001(\t\022\023\n\013create_time\030\006 \001" +
+      "(\003\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002" +
+      "(\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSe" +
+      "quenceIds\022 \n\030last_flushed_sequence_id\030\001 " +
+      "\002(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSe" +
+      "quenceIdBE\n*org.apache.hadoop.hbase.prot" +
+      "obuf.generatedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/AccessControl.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/AccessControl.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/AccessControl.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/AccessControl.proto Wed Jul 10 16:30:32 2013
@@ -85,15 +85,15 @@ message CheckPermissionsResponse {
 }
 
 service AccessControlService {
-    rpc grant(GrantRequest)
+    rpc Grant(GrantRequest)
       returns (GrantResponse);
 
-    rpc revoke(RevokeRequest)
+    rpc Revoke(RevokeRequest)
       returns (RevokeResponse);
 
-    rpc getUserPermissions(UserPermissionsRequest)
+    rpc GetUserPermissions(UserPermissionsRequest)
       returns (UserPermissionsResponse);
 
-    rpc checkPermissions(CheckPermissionsRequest)
+    rpc CheckPermissions(CheckPermissionsRequest)
       returns (CheckPermissionsResponse);
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Admin.proto Wed Jul 10 16:30:32 2013
@@ -30,12 +30,12 @@ import "WAL.proto";
 
 message GetRegionInfoRequest {
   required RegionSpecifier region = 1;
-  optional bool compactionState = 2;
+  optional bool compaction_state = 2;
 }
 
 message GetRegionInfoResponse {
-  required RegionInfo regionInfo = 1;
-  optional CompactionState compactionState = 2;
+  required RegionInfo region_info = 1;
+  optional CompactionState compaction_state = 2;
 
   enum CompactionState {
     NONE = 0;
@@ -55,28 +55,28 @@ message GetStoreFileRequest {
 }
 
 message GetStoreFileResponse {
-  repeated string storeFile = 1;
+  repeated string store_file = 1;
 }
 
 message GetOnlineRegionRequest {
 }
 
 message GetOnlineRegionResponse {
-  repeated RegionInfo regionInfo = 1;
+  repeated RegionInfo region_info = 1;
 }
 
 message OpenRegionRequest {
-  repeated RegionOpenInfo openInfo = 1;
+  repeated RegionOpenInfo open_info = 1;
 
   message RegionOpenInfo {
     required RegionInfo region = 1;
-    optional uint32 versionOfOfflineNode = 2;
-    repeated ServerName favoredNodes = 3;
+    optional uint32 version_of_offline_node = 2;
+    repeated ServerName favored_nodes = 3;
   }
 }
 
 message OpenRegionResponse {
-  repeated RegionOpeningState openingState = 1;
+  repeated RegionOpeningState opening_state = 1;
 
   enum RegionOpeningState {
     OPENED = 0;
@@ -91,9 +91,9 @@ message OpenRegionResponse {
  */
 message CloseRegionRequest {
   required RegionSpecifier region = 1;
-  optional uint32 versionOfClosingNode = 2;
-  optional bool transitionInZK = 3 [default = true];
-  optional ServerName destinationServer = 4;
+  optional uint32 version_of_closing_node = 2;
+  optional bool transition_in_ZK = 3 [default = true];
+  optional ServerName destination_server = 4;
 }
 
 message CloseRegionResponse {
@@ -107,11 +107,11 @@ message CloseRegionResponse {
  */
 message FlushRegionRequest {
   required RegionSpecifier region = 1;
-  optional uint64 ifOlderThanTs = 2;
+  optional uint64 if_older_than_ts = 2;
 }
 
 message FlushRegionResponse {
-  required uint64 lastFlushTime = 1;
+  required uint64 last_flush_time = 1;
   optional bool flushed = 2;
 }
 
@@ -124,7 +124,7 @@ message FlushRegionResponse {
  */
 message SplitRegionRequest {
   required RegionSpecifier region = 1;
-  optional bytes splitPoint = 2;
+  optional bytes split_point = 2;
 }
 
 message SplitRegionResponse {
@@ -150,8 +150,8 @@ message CompactRegionResponse {
  * This method currently closes the regions and then merges them
  */
 message MergeRegionsRequest {
-  required RegionSpecifier regionA = 1;
-  required RegionSpecifier regionB = 2;
+  required RegionSpecifier region_a = 1;
+  required RegionSpecifier region_b = 2;
   optional bool forcible = 3 [default = false];
 }
 
@@ -163,10 +163,10 @@ message WALEntry {
   required WALKey key = 1;
   // Following may be null if the KVs/Cells are carried along the side in a cellblock (See
   // RPC for more on cellblocks). If Cells/KVs are in a cellblock, this next field is null
-  // and associatedCellCount has count of Cells associated w/ this WALEntry
-  repeated bytes keyValueBytes = 2;
+  // and associated_cell_count has count of Cells associated w/ this WALEntry
+  repeated bytes key_value_bytes = 2;
   // If Cell data is carried alongside in a cellblock, this is count of Cells in the cellblock.
-  optional int32 associatedCellCount = 3;
+  optional int32 associated_cell_count = 3;
 }
 
 /**
@@ -186,7 +186,7 @@ message RollWALWriterRequest {
 
 message RollWALWriterResponse {
   // A list of encoded name of regions to flush
-  repeated bytes regionToFlush = 1;
+  repeated bytes region_to_flush = 1;
 }
 
 message StopServerRequest {
@@ -200,54 +200,54 @@ message GetServerInfoRequest {
 }
 
 message ServerInfo {
-  required ServerName serverName = 1;
-  optional uint32 webuiPort = 2;
+  required ServerName server_name = 1;
+  optional uint32 webui_port = 2;
 }
 
 message GetServerInfoResponse {
-  required ServerInfo serverInfo = 1;
+  required ServerInfo server_info = 1;
 }
 
 service AdminService {
-  rpc getRegionInfo(GetRegionInfoRequest)
+  rpc GetRegionInfo(GetRegionInfoRequest)
     returns(GetRegionInfoResponse);
 
-  rpc getStoreFile(GetStoreFileRequest)
+  rpc GetStoreFile(GetStoreFileRequest)
     returns(GetStoreFileResponse);
 
-  rpc getOnlineRegion(GetOnlineRegionRequest)
+  rpc GetOnlineRegion(GetOnlineRegionRequest)
     returns(GetOnlineRegionResponse);
 
-  rpc openRegion(OpenRegionRequest)
+  rpc OpenRegion(OpenRegionRequest)
     returns(OpenRegionResponse);
 
-  rpc closeRegion(CloseRegionRequest)
+  rpc CloseRegion(CloseRegionRequest)
     returns(CloseRegionResponse);
 
-  rpc flushRegion(FlushRegionRequest)
+  rpc FlushRegion(FlushRegionRequest)
     returns(FlushRegionResponse);
 
-  rpc splitRegion(SplitRegionRequest)
+  rpc SplitRegion(SplitRegionRequest)
     returns(SplitRegionResponse);
 
-  rpc compactRegion(CompactRegionRequest)
+  rpc CompactRegion(CompactRegionRequest)
     returns(CompactRegionResponse);
-    
-  rpc mergeRegions(MergeRegionsRequest)
+
+  rpc MergeRegions(MergeRegionsRequest)
     returns(MergeRegionsResponse);
 
-  rpc replicateWALEntry(ReplicateWALEntryRequest)
+  rpc ReplicateWALEntry(ReplicateWALEntryRequest)
     returns(ReplicateWALEntryResponse);
-    
-  rpc replay(MultiRequest)
-    returns(MultiResponse);    
 
-  rpc rollWALWriter(RollWALWriterRequest)
+  rpc Replay(MultiRequest)
+    returns(MultiResponse);
+
+  rpc RollWALWriter(RollWALWriterRequest)
     returns(RollWALWriterResponse);
 
-  rpc getServerInfo(GetServerInfoRequest)
+  rpc GetServerInfo(GetServerInfoRequest)
     returns(GetServerInfoResponse);
 
-  rpc stopServer(StopServerRequest)
+  rpc StopServer(StopServerRequest)
     returns(StopServerResponse);
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Aggregate.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Aggregate.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Aggregate.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Aggregate.proto Wed Jul 10 16:30:32 2013
@@ -31,32 +31,32 @@ message AggregateArgument {
    *  (3) any bytes required to construct the ColumnInterpreter object
    *      properly
    */
-  required string interpreterClassName = 1;
+  required string interpreter_class_name = 1;
   required Scan scan = 2;
-  optional bytes  interpreterSpecificBytes = 3;
+  optional bytes  interpreter_specific_bytes = 3;
 }
 
 message AggregateResponse {
   /**
    * The AggregateService methods all have a response that either is a Pair
-   * or a simple object. When it is a Pair both firstPart and secondPart
-   * have defined values (and the secondPart is not present in the response 
+   * or a simple object. When it is a Pair both first_part and second_part
+   * have defined values (and the second_part is not present in the response
    * when the response is not a pair). Refer to the AggregateImplementation 
    * class for an overview of the AggregateResponse object constructions. 
    */ 
-  repeated bytes firstPart = 1;
-  optional bytes secondPart = 2;  
+  repeated bytes first_part = 1;
+  optional bytes second_part = 2;
 }
 
 /** Refer to the AggregateImplementation class for an overview of the 
  *  AggregateService method implementations and their functionality.
  */
 service AggregateService {
-  rpc getMax (AggregateArgument) returns (AggregateResponse);
-  rpc getMin (AggregateArgument) returns (AggregateResponse);
-  rpc getSum (AggregateArgument) returns (AggregateResponse);
-  rpc getRowNum (AggregateArgument) returns (AggregateResponse);
-  rpc getAvg (AggregateArgument) returns (AggregateResponse);
-  rpc getStd (AggregateArgument) returns (AggregateResponse);
-  rpc getMedian (AggregateArgument) returns (AggregateResponse);
-}
\ No newline at end of file
+  rpc GetMax (AggregateArgument) returns (AggregateResponse);
+  rpc GetMin (AggregateArgument) returns (AggregateResponse);
+  rpc GetSum (AggregateArgument) returns (AggregateResponse);
+  rpc GetRowNum (AggregateArgument) returns (AggregateResponse);
+  rpc GetAvg (AggregateArgument) returns (AggregateResponse);
+  rpc GetStd (AggregateArgument) returns (AggregateResponse);
+  rpc GetMedian (AggregateArgument) returns (AggregateResponse);
+}

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Authentication.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Authentication.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Authentication.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Authentication.proto Wed Jul 10 16:30:32 2013
@@ -24,7 +24,7 @@ option optimize_for = SPEED;
 
 message AuthenticationKey {
     required int32 id = 1;
-    required int64 expirationDate = 2;
+    required int64 expiration_date = 2;
     required bytes key = 3;
 }
 
@@ -35,10 +35,10 @@ message TokenIdentifier {
     }
     required Kind kind = 1;
     required bytes username = 2;
-    required int32 keyId = 3;
-    optional int64 issueDate = 4;
-    optional int64 expirationDate = 5;
-    optional int64 sequenceNumber = 6;
+    required int32 key_id = 3;
+    optional int64 issue_date = 4;
+    optional int64 expiration_date = 5;
+    optional int64 sequence_number = 6;
 }
 
 
@@ -67,15 +67,15 @@ message WhoAmIRequest {
 
 message WhoAmIResponse {
     optional string username = 1;
-    optional string authMethod = 2;
+    optional string auth_method = 2;
 }
 
 
 // RPC service
 service AuthenticationService {
-    rpc getAuthenticationToken(TokenRequest)
+    rpc GetAuthenticationToken(TokenRequest)
         returns (TokenResponse);
 
-    rpc whoami(WhoAmIRequest)
+    rpc WhoAmI(WhoAmIRequest)
         returns (WhoAmIResponse);
-}
\ No newline at end of file
+}

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto Wed Jul 10 16:30:32 2013
@@ -46,7 +46,7 @@ message Cell {
   optional bytes family = 2;
   optional bytes qualifier = 3;
   optional uint64 timestamp = 4;
-  optional CellType cellType = 5;
+  optional CellType cell_type = 5;
   optional bytes value = 6;
 }
 
@@ -59,6 +59,6 @@ message KeyValue {
   required bytes family = 2;
   required bytes qualifier = 3;
   optional uint64 timestamp = 4;
-  optional CellType keyType = 5;
+  optional CellType key_type = 5;
   optional bytes value = 6;
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto Wed Jul 10 16:30:32 2013
@@ -45,11 +45,11 @@ message Get {
   repeated Column column = 2;
   repeated NameBytesPair attribute = 3;
   optional Filter filter = 4;
-  optional TimeRange timeRange = 5;
-  optional uint32 maxVersions = 6 [default = 1];
-  optional bool cacheBlocks = 7 [default = true];
-  optional uint32 storeLimit = 8;
-  optional uint32 storeOffset = 9;
+  optional TimeRange time_range = 5;
+  optional uint32 max_versions = 6 [default = 1];
+  optional bool cache_blocks = 7 [default = true];
+  optional uint32 store_limit = 8;
+  optional uint32 store_offset = 9;
 }
 
 message Result {
@@ -60,18 +60,18 @@ message Result {
   // not part of this protobuf message; they are passed alongside
   // and then this Message is just a placeholder with metadata.
   // The count is needed to know how many to peel off the block of Cells as
-  // ours.  NOTE: This is different from the pb managed cellCount of the
+  // ours.  NOTE: This is different from the pb managed cell_count of the
   // 'cell' field above which is non-null when the cells are pb'd.
-  optional int32 associatedCellCount = 2;
+  optional int32 associated_cell_count = 2;
 }
 
 /**
  * The get request. Perform a single Get operation.
- * Unless existenceOnly is specified, return all the requested data
+ * Unless existence_only is specified, return all the requested data
  * for the row that matches exactly, or the one that immediately
- * precedes it if closestRowBefore is specified.
+ * precedes it if closest_row_before is specified.
  *
- * If existenceOnly is set, only the existence will be returned.
+ * If existence_only is set, only the existence will be returned.
  */
 message GetRequest {
   required RegionSpecifier region = 1;
@@ -79,12 +79,12 @@ message GetRequest {
 
   // If the row to get doesn't exist, return the
   // closest row before.
-  optional bool closestRowBefore = 3;
+  optional bool closest_row_before = 3;
 
   // The result isn't asked for, just check for
-  // the existence. If closestRowBefore specified,
+  // the existence. If closest_row_before specified,
   // this will be ignored
-  optional bool existenceOnly = 4;
+  optional bool existence_only = 4;
 }
 
 message MultiGetRequest {
@@ -93,12 +93,12 @@ message MultiGetRequest {
 
   // If the row to get doesn't exist, return the
   // closest row before.
-  optional bool closestRowBefore = 3;
+  optional bool closest_row_before = 3;
 
   // The result isn't asked for, just check for
-  // the existence. If closestRowBefore specified,
+  // the existence. If closest_row_before specified,
   // this will be ignored
-  optional bool existenceOnly = 4;
+  optional bool existence_only = 4;
 }
 
 message GetResponse {
@@ -125,7 +125,7 @@ message Condition {
   required bytes row = 1;
   required bytes family = 2;
   required bytes qualifier = 3;
-  required CompareType compareType = 4;
+  required CompareType compare_type = 4;
   required Comparator comparator = 5;
 }
 
@@ -139,22 +139,22 @@ message Condition {
  */
 message MutationProto {
   optional bytes row = 1;
-  optional MutationType mutateType = 2;
-  repeated ColumnValue columnValue = 3;
+  optional MutationType mutate_type = 2;
+  repeated ColumnValue column_value = 3;
   optional uint64 timestamp = 4;
   repeated NameBytesPair attribute = 5;
   optional Durability durability = 6 [default = USE_DEFAULT];
 
   // For some mutations, a result may be returned, in which case,
   // time range can be specified for potential performance gain
-  optional TimeRange timeRange = 7;
+  optional TimeRange time_range = 7;
   // The below count is set when the associated cells are NOT
   // part of this protobuf message; they are passed alongside
   // and then this Message is a placeholder with metadata.  The
   // count is needed to know how many to peel off the block of Cells as
-  // ours.  NOTE: This is different from the pb managed cellCount of the
+  // ours.  NOTE: This is different from the pb managed cell_count of the
   // 'cell' field above which is non-null when the cells are pb'd.
-  optional int32 associatedCellCount = 8;
+  optional int32 associated_cell_count = 8;
 
   enum Durability {
     USE_DEFAULT  = 0;
@@ -180,13 +180,13 @@ message MutationProto {
 
   message ColumnValue {
     required bytes family = 1;
-    repeated QualifierValue qualifierValue = 2;
+    repeated QualifierValue qualifier_value = 2;
 
     message QualifierValue {
       optional bytes qualifier = 1;
       optional bytes value = 2;
       optional uint64 timestamp = 3;
-      optional DeleteType deleteType = 4;
+      optional DeleteType delete_type = 4;
     }
   }
 }
@@ -225,18 +225,18 @@ message MutateResponse {
 message Scan {
   repeated Column column = 1;
   repeated NameBytesPair attribute = 2;
-  optional bytes startRow = 3;
-  optional bytes stopRow = 4;
+  optional bytes start_row = 3;
+  optional bytes stop_row = 4;
   optional Filter filter = 5;
-  optional TimeRange timeRange = 6;
-  optional uint32 maxVersions = 7 [default = 1];
-  optional bool cacheBlocks = 8 [default = true];
-  optional uint32 batchSize = 9;
-  optional uint64 maxResultSize = 10;
-  optional uint32 storeLimit = 11;
-  optional uint32 storeOffset = 12;
-  optional bool loadColumnFamiliesOnDemand = 13; /* DO NOT add defaults to loadColumnFamiliesOnDemand. */
-  optional uint32 cachingCount = 14;
+  optional TimeRange time_range = 6;
+  optional uint32 max_versions = 7 [default = 1];
+  optional bool cache_blocks = 8 [default = true];
+  optional uint32 batch_size = 9;
+  optional uint64 max_result_size = 10;
+  optional uint32 store_limit = 11;
+  optional uint32 store_offset = 12;
+  optional bool load_column_families_on_demand = 13; /* DO NOT add defaults to load_column_families_on_demand. */
+  optional uint32 caching_count = 14;
   optional bool prefetching = 15;
 }
 
@@ -254,25 +254,25 @@ message Scan {
 message ScanRequest {
   optional RegionSpecifier region = 1;
   optional Scan scan = 2;
-  optional uint64 scannerId = 3;
-  optional uint32 numberOfRows = 4;
-  optional bool closeScanner = 5;
-  optional uint64 nextCallSeq = 6;
+  optional uint64 scanner_id = 3;
+  optional uint32 number_of_rows = 4;
+  optional bool close_scanner = 5;
+  optional uint64 next_call_seq = 6;
 }
 
 /**
- * The scan response. If there are no more results, moreResults will
+ * The scan response. If there are no more results, more_results will
  * be false.  If it is not specified, it means there are more.
  */
 message ScanResponse {
-  optional ResultCellMeta resultCellMeta = 1;
-  optional uint64 scannerId = 2;
-  optional bool moreResults = 3;
+  optional ResultCellMeta result_cell_meta = 1;
+  optional uint64 scanner_id = 2;
+  optional bool more_results = 3;
   optional uint32 ttl = 4;
 }
 
 message ResultCellMeta {
-  repeated uint32 cellsLength = 1;
+  repeated uint32 cells_length = 1;
 }
 
 /**
@@ -281,8 +281,8 @@ message ResultCellMeta {
  */
 message BulkLoadHFileRequest {
   required RegionSpecifier region = 1;
-  repeated FamilyPath familyPath = 2;
-  optional bool assignSeqNum = 3;
+  repeated FamilyPath family_path = 2;
+  optional bool assign_seq_num = 3;
 
   message FamilyPath {
     required bytes family = 1;
@@ -296,8 +296,8 @@ message BulkLoadHFileResponse {
 
 message CoprocessorServiceCall {
   required bytes row = 1;
-  required string serviceName = 2;
-  required string methodName = 3;
+  required string service_name = 2;
+  required string method_name = 3;
   required bytes request = 4;
 }
 
@@ -352,24 +352,24 @@ message MultiResponse {
 
 
 service ClientService {
-  rpc get(GetRequest)
+  rpc Get(GetRequest)
     returns(GetResponse);
 
-  rpc multiGet(MultiGetRequest)
+  rpc MultiGet(MultiGetRequest)
     returns(MultiGetResponse);
 
-  rpc mutate(MutateRequest)
+  rpc Mutate(MutateRequest)
     returns(MutateResponse);
 
-  rpc scan(ScanRequest)
+  rpc Scan(ScanRequest)
     returns(ScanResponse);
 
-  rpc bulkLoadHFile(BulkLoadHFileRequest)
+  rpc BulkLoadHFile(BulkLoadHFileRequest)
     returns(BulkLoadHFileResponse);
 
-  rpc execService(CoprocessorServiceRequest)
+  rpc ExecService(CoprocessorServiceRequest)
     returns(CoprocessorServiceResponse);
 
-  rpc multi(MultiRequest)
+  rpc Multi(MultiRequest)
     returns(MultiResponse);
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ClusterId.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ClusterId.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ClusterId.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ClusterId.proto Wed Jul 10 16:30:32 2013
@@ -29,5 +29,5 @@ option optimize_for = SPEED;
  */
 message ClusterId {
   // This is the cluster id, a uuid as a String
-  required string clusterId = 1;
+  required string cluster_id = 1;
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto Wed Jul 10 16:30:32 2013
@@ -28,7 +28,7 @@ import "ClusterId.proto";
 import "FS.proto";
 
 message RegionState {
-  required RegionInfo regionInfo = 1;
+  required RegionInfo region_info = 1;
   required State state = 2;
   optional uint64 stamp = 3;
   enum State {
@@ -50,12 +50,12 @@ message RegionState {
 
 message RegionInTransition {
   required RegionSpecifier spec = 1;
-  required RegionState regionState = 2;
+  required RegionState region_state = 2;
 }
 
 message RegionLoad {
   /** the region specifier */
-  required RegionSpecifier regionSpecifier = 1;
+  required RegionSpecifier region_specifier = 1;
 
   /** the number of stores for the region */
   optional uint32 stores = 2;
@@ -64,65 +64,65 @@ message RegionLoad {
   optional uint32 storefiles = 3;
 
   /** the total size of the store files for the region, uncompressed, in MB */
-  optional uint32 storeUncompressedSizeMB = 4;
+  optional uint32 store_uncompressed_size_MB = 4;
 
   /** the current total size of the store files for the region, in MB */
-  optional uint32 storefileSizeMB = 5;
+  optional uint32 storefile_size_MB = 5;
 
   /** the current size of the memstore for the region, in MB */
-  optional uint32 memstoreSizeMB = 6;
+  optional uint32 memstore_size_MB = 6;
 
   /**
    * The current total size of root-level store file indexes for the region,
    * in MB. The same as {@link #rootIndexSizeKB} but in MB.
    */
-  optional uint32 storefileIndexSizeMB = 7;
+  optional uint32 storefile_index_size_MB = 7;
 
   /** the current total read requests made to region */
-  optional uint64 readRequestsCount = 8;
+  optional uint64 read_requests_count = 8;
 
   /** the current total write requests made to region */
-  optional uint64 writeRequestsCount = 9;
+  optional uint64 write_requests_count = 9;
 
   /** the total compacting key values in currently running compaction */
-  optional uint64 totalCompactingKVs = 10;
+  optional uint64 total_compacting_KVs = 10;
 
   /** the completed count of key values in currently running compaction */
-  optional uint64 currentCompactedKVs = 11;
+  optional uint64 current_compacted_KVs = 11;
 
    /** The current total size of root-level indexes for the region, in KB. */
-  optional uint32 rootIndexSizeKB = 12;
+  optional uint32 root_index_size_KB = 12;
 
   /** The total size of all index blocks, not just the root level, in KB. */
-  optional uint32 totalStaticIndexSizeKB = 13;
+  optional uint32 total_static_index_size_KB = 13;
 
   /**
    * The total size of all Bloom filter blocks, not just loaded into the
    * block cache, in KB.
    */
-  optional uint32 totalStaticBloomSizeKB = 14;
+  optional uint32 total_static_bloom_size_KB = 14;
 
   /** the most recent sequence Id from cache flush */
-  optional uint64 completeSequenceId = 15;
+  optional uint64 complete_sequence_id = 15;
 }
 
 /* Server-level protobufs */
 
 message ServerLoad {
   /** Number of requests since last report. */
-  optional uint32 numberOfRequests = 1;
+  optional uint32 number_of_requests = 1;
 
   /** Total Number of requests from the start of the region server. */
-  optional uint32 totalNumberOfRequests = 2;
+  optional uint32 total_number_of_requests = 2;
 
   /** the amount of used heap, in MB. */
-  optional uint32 usedHeapMB = 3;
+  optional uint32 used_heap_MB = 3;
 
   /** the maximum allowable size of the heap, in MB. */
-  optional uint32 maxHeapMB = 4;
+  optional uint32 max_heap_MB = 4;
 
   /** Information on the load of individual regions. */
-  repeated RegionLoad regionLoads = 5;
+  repeated RegionLoad region_loads = 5;
 
   /**
    * Regionserver-level coprocessors, e.g., WALObserver implementations.
@@ -132,39 +132,38 @@ message ServerLoad {
   repeated Coprocessor coprocessors = 6;
 
   /**
-   * Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
+   * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
    * time is measured as the difference, measured in milliseconds, between the current time
    * and midnight, January 1, 1970 UTC.
    */
-  optional uint64 reportStartTime = 7;
+  optional uint64 report_start_time = 7;
 
   /**
    * Time when report was generated.
    * time is measured as the difference, measured in milliseconds, between the current time
    * and midnight, January 1, 1970 UTC.
    */
-  optional uint64 reportEndTime = 8;
+  optional uint64 report_end_time = 8;
 
   /**
    * The port number that this region server is hosing an info server on.
    */
-  optional uint32 infoServerPort = 9;
+  optional uint32 info_server_port = 9;
 }
 
 message LiveServerInfo {
   required ServerName server = 1;
-  required ServerLoad serverLoad = 2;
+  required ServerLoad server_load = 2;
 }
 
-
 message ClusterStatus {
-  optional HBaseVersionFileContent hbaseVersion = 1;
-  repeated LiveServerInfo liveServers = 2;
-  repeated ServerName deadServers = 3;
-  repeated RegionInTransition regionsInTransition = 4;
-  optional ClusterId clusterId = 5;
-  repeated Coprocessor masterCoprocessors = 6;
+  optional HBaseVersionFileContent hbase_version = 1;
+  repeated LiveServerInfo live_servers = 2;
+  repeated ServerName dead_servers = 3;
+  repeated RegionInTransition regions_in_transition = 4;
+  optional ClusterId cluster_id = 5;
+  repeated Coprocessor master_coprocessors = 6;
   optional ServerName master = 7;
-  repeated ServerName backupMasters = 8;
-  optional bool balancerOn = 9;
+  repeated ServerName backup_masters = 8;
+  optional bool balancer_on = 9;
 }

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Comparator.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Comparator.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Comparator.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Comparator.proto Wed Jul 10 16:30:32 2013
@@ -28,7 +28,7 @@ option optimize_for = SPEED;
 
 message Comparator {
   required string name = 1;
-  optional bytes serializedComparator = 2;
+  optional bytes serialized_comparator = 2;
 }
 
 message ByteArrayComparable {
@@ -45,7 +45,7 @@ message BinaryPrefixComparator {
 
 message BitComparator {
   required ByteArrayComparable comparable = 1;
-  required BitwiseOp bitwiseOp = 2;
+  required BitwiseOp bitwise_op = 2;
 
   enum BitwiseOp {
     AND = 1;
@@ -59,7 +59,7 @@ message NullComparator {
 
 message RegexStringComparator {
   required string pattern = 1;
-  required int32 patternFlags = 2;
+  required int32 pattern_flags = 2;
   required string charset = 3;
 }
 

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ErrorHandling.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ErrorHandling.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ErrorHandling.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ErrorHandling.proto Wed Jul 10 16:30:32 2013
@@ -28,10 +28,10 @@ option optimize_for = SPEED;
  * so we can serialize exceptions.
  */
 message StackTraceElementMessage {
-  optional string declaringClass = 1;
-  optional string methodName = 2;
-  optional string fileName = 3;
-  optional int32 lineNumber = 4;
+  optional string declaring_class = 1;
+  optional string method_name = 2;
+  optional string file_name = 3;
+  optional int32 line_number = 4;
 }
 
 /**
@@ -41,9 +41,9 @@ message StackTraceElementMessage {
  * (which should be another protobuffed class).
  */
 message GenericExceptionMessage {
-  optional string className = 1;
+  optional string class_name = 1;
   optional string message = 2;
-  optional bytes errorInfo = 3;
+  optional bytes error_info = 3;
   repeated StackTraceElementMessage trace = 4;
 }
 
@@ -53,6 +53,5 @@ message GenericExceptionMessage {
  */
 message ForeignExceptionMessage {
   optional string source = 1;
-  optional GenericExceptionMessage genericException = 2;
-
-}
\ No newline at end of file
+  optional GenericExceptionMessage generic_exception = 2;
+}

Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto?rev=1501834&r1=1501833&r2=1501834&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto Wed Jul 10 16:30:32 2013
@@ -29,7 +29,7 @@ import "Comparator.proto";
 
 message Filter {
   required string name = 1;
-  optional bytes serializedFilter = 2;
+  optional bytes serialized_filter = 2;
 }
 
 message ColumnCountGetFilter {
@@ -39,7 +39,7 @@ message ColumnCountGetFilter {
 message ColumnPaginationFilter {
   required int32 limit = 1;
   optional int32 offset = 2;
-  optional bytes columnOffset = 3;
+  optional bytes column_offset = 3;
 }
 
 message ColumnPrefixFilter {
@@ -47,26 +47,26 @@ message ColumnPrefixFilter {
 }
 
 message ColumnRangeFilter {
-  optional bytes minColumn = 1;
-  optional bool minColumnInclusive = 2;
-  optional bytes maxColumn = 3;
-  optional bool maxColumnInclusive = 4;
+  optional bytes min_column = 1;
+  optional bool min_column_inclusive = 2;
+  optional bytes max_column = 3;
+  optional bool max_column_inclusive = 4;
 }
 
 message CompareFilter {
-  required CompareType compareOp = 1;
+  required CompareType compare_op = 1;
   optional Comparator comparator = 2;
 }
 
 message DependentColumnFilter {
-  required CompareFilter compareFilter = 1;
-  optional bytes columnFamily = 2;
-  optional bytes columnQualifier = 3;
-  optional bool dropDependentColumn = 4;
+  required CompareFilter compare_filter = 1;
+  optional bytes column_family = 2;
+  optional bytes column_qualifier = 3;
+  optional bool drop_dependent_column = 4;
 }
 
 message FamilyFilter {
-  required CompareFilter compareFilter = 1;
+  required CompareFilter compare_filter = 1;
 }
 
 message FilterList {
@@ -91,23 +91,23 @@ message FirstKeyValueMatchingQualifiersF
 }
 
 message FuzzyRowFilter {
-  repeated BytesBytesPair fuzzyKeysData = 1;
+  repeated BytesBytesPair fuzzy_keys_data = 1;
 }
 
 message InclusiveStopFilter {
-  optional bytes stopRowKey = 1;
+  optional bytes stop_row_key = 1;
 }
 
 message KeyOnlyFilter {
-  required bool lenAsVal = 1;
+  required bool len_as_val = 1;
 }
 
 message MultipleColumnPrefixFilter {
-  repeated bytes sortedPrefixes = 1;
+  repeated bytes sorted_prefixes = 1;
 }
 
 message PageFilter {
-  required int64 pageSize = 1;
+  required int64 page_size = 1;
 }
 
 message PrefixFilter {
@@ -115,7 +115,7 @@ message PrefixFilter {
 }
 
 message QualifierFilter {
-  required CompareFilter compareFilter = 1;
+  required CompareFilter compare_filter = 1;
 }
 
 message RandomRowFilter {
@@ -123,20 +123,20 @@ message RandomRowFilter {
 }
 
 message RowFilter {
-  required CompareFilter compareFilter = 1;
+  required CompareFilter compare_filter = 1;
 }
 
 message SingleColumnValueExcludeFilter {
-  required SingleColumnValueFilter singleColumnValueFilter = 1;
+  required SingleColumnValueFilter single_column_value_filter = 1;
 }
 
 message SingleColumnValueFilter {
-  optional bytes columnFamily = 1;
-  optional bytes columnQualifier = 2;
-  required CompareType compareOp = 3;
+  optional bytes column_family = 1;
+  optional bytes column_qualifier = 2;
+  required CompareType compare_op = 3;
   required Comparator comparator = 4;
-  optional bool filterIfMissing = 5;
-  optional bool latestVersionOnly = 6;
+  optional bool filter_if_missing = 5;
+  optional bool latest_version_only = 6;
 }
 
 message SkipFilter {
@@ -148,7 +148,7 @@ message TimestampsFilter {
 }
 
 message ValueFilter {
-  required CompareFilter compareFilter = 1;
+  required CompareFilter compare_filter = 1;
 }
 
 message WhileMatchFilter {