You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2015/02/12 23:49:49 UTC

[8/9] hbase git commit: HBASE-9531 a command line (hbase shell) interface to retreive the replication metrics and show replication lag

http://git-wip-us.apache.org/repos/asf/hbase/blob/c391dfbd/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5bc44ff..95a6e42 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -4348,273 +4348,48 @@ public final class ClusterStatusProtos {
     // @@protoc_insertion_point(class_scope:RegionLoad)
   }
 
-  public interface ServerLoadOrBuilder
+  public interface ReplicationLoadSinkOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
-    // optional uint32 number_of_requests = 1;
-    /**
-     * <code>optional uint32 number_of_requests = 1;</code>
-     *
-     * <pre>
-     ** Number of requests since last report. 
-     * </pre>
-     */
-    boolean hasNumberOfRequests();
-    /**
-     * <code>optional uint32 number_of_requests = 1;</code>
-     *
-     * <pre>
-     ** Number of requests since last report. 
-     * </pre>
-     */
-    int getNumberOfRequests();
-
-    // optional uint32 total_number_of_requests = 2;
-    /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
-     *
-     * <pre>
-     ** Total Number of requests from the start of the region server. 
-     * </pre>
-     */
-    boolean hasTotalNumberOfRequests();
-    /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
-     *
-     * <pre>
-     ** Total Number of requests from the start of the region server. 
-     * </pre>
-     */
-    int getTotalNumberOfRequests();
-
-    // optional uint32 used_heap_MB = 3;
-    /**
-     * <code>optional uint32 used_heap_MB = 3;</code>
-     *
-     * <pre>
-     ** the amount of used heap, in MB. 
-     * </pre>
-     */
-    boolean hasUsedHeapMB();
-    /**
-     * <code>optional uint32 used_heap_MB = 3;</code>
-     *
-     * <pre>
-     ** the amount of used heap, in MB. 
-     * </pre>
-     */
-    int getUsedHeapMB();
-
-    // optional uint32 max_heap_MB = 4;
-    /**
-     * <code>optional uint32 max_heap_MB = 4;</code>
-     *
-     * <pre>
-     ** the maximum allowable size of the heap, in MB. 
-     * </pre>
-     */
-    boolean hasMaxHeapMB();
-    /**
-     * <code>optional uint32 max_heap_MB = 4;</code>
-     *
-     * <pre>
-     ** the maximum allowable size of the heap, in MB. 
-     * </pre>
-     */
-    int getMaxHeapMB();
-
-    // repeated .RegionLoad region_loads = 5;
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> 
-        getRegionLoadsList();
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index);
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    int getRegionLoadsCount();
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
-        getRegionLoadsOrBuilderList();
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
-        int index);
-
-    // repeated .Coprocessor coprocessors = 6;
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> 
-        getCoprocessorsList();
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index);
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    int getCoprocessorsCount();
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
-        getCoprocessorsOrBuilderList();
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
-        int index);
-
-    // optional uint64 report_start_time = 7;
-    /**
-     * <code>optional uint64 report_start_time = 7;</code>
-     *
-     * <pre>
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    boolean hasReportStartTime();
-    /**
-     * <code>optional uint64 report_start_time = 7;</code>
-     *
-     * <pre>
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    long getReportStartTime();
-
-    // optional uint64 report_end_time = 8;
+    // required uint64 ageOfLastAppliedOp = 1;
     /**
-     * <code>optional uint64 report_end_time = 8;</code>
-     *
-     * <pre>
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
+     * <code>required uint64 ageOfLastAppliedOp = 1;</code>
      */
-    boolean hasReportEndTime();
+    boolean hasAgeOfLastAppliedOp();
     /**
-     * <code>optional uint64 report_end_time = 8;</code>
-     *
-     * <pre>
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
+     * <code>required uint64 ageOfLastAppliedOp = 1;</code>
      */
-    long getReportEndTime();
+    long getAgeOfLastAppliedOp();
 
-    // optional uint32 info_server_port = 9;
+    // required uint64 timeStampsOfLastAppliedOp = 2;
     /**
-     * <code>optional uint32 info_server_port = 9;</code>
-     *
-     * <pre>
-     **
-     * The port number that this region server is hosing an info server on.
-     * </pre>
+     * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
      */
-    boolean hasInfoServerPort();
+    boolean hasTimeStampsOfLastAppliedOp();
     /**
-     * <code>optional uint32 info_server_port = 9;</code>
-     *
-     * <pre>
-     **
-     * The port number that this region server is hosing an info server on.
-     * </pre>
+     * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
      */
-    int getInfoServerPort();
+    long getTimeStampsOfLastAppliedOp();
   }
   /**
-   * Protobuf type {@code ServerLoad}
+   * Protobuf type {@code ReplicationLoadSink}
    */
-  public static final class ServerLoad extends
+  public static final class ReplicationLoadSink extends
       com.google.protobuf.GeneratedMessage
-      implements ServerLoadOrBuilder {
-    // Use ServerLoad.newBuilder() to construct.
-    private ServerLoad(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      implements ReplicationLoadSinkOrBuilder {
+    // Use ReplicationLoadSink.newBuilder() to construct.
+    private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
       super(builder);
       this.unknownFields = builder.getUnknownFields();
     }
-    private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+    private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
 
-    private static final ServerLoad defaultInstance;
-    public static ServerLoad getDefaultInstance() {
+    private static final ReplicationLoadSink defaultInstance;
+    public static ReplicationLoadSink getDefaultInstance() {
       return defaultInstance;
     }
 
-    public ServerLoad getDefaultInstanceForType() {
+    public ReplicationLoadSink getDefaultInstanceForType() {
       return defaultInstance;
     }
 
@@ -4624,7 +4399,7 @@ public final class ClusterStatusProtos {
         getUnknownFields() {
       return this.unknownFields;
     }
-    private ServerLoad(
+    private ReplicationLoadSink(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
         throws com.google.protobuf.InvalidProtocolBufferException {
@@ -4649,53 +4424,12 @@ public final class ClusterStatusProtos {
             }
             case 8: {
               bitField0_ |= 0x00000001;
-              numberOfRequests_ = input.readUInt32();
+              ageOfLastAppliedOp_ = input.readUInt64();
               break;
             }
             case 16: {
               bitField0_ |= 0x00000002;
-              totalNumberOfRequests_ = input.readUInt32();
-              break;
-            }
-            case 24: {
-              bitField0_ |= 0x00000004;
-              usedHeapMB_ = input.readUInt32();
-              break;
-            }
-            case 32: {
-              bitField0_ |= 0x00000008;
-              maxHeapMB_ = input.readUInt32();
-              break;
-            }
-            case 42: {
-              if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
-                regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>();
-                mutable_bitField0_ |= 0x00000010;
-              }
-              regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry));
-              break;
-            }
-            case 50: {
-              if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
-                coprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>();
-                mutable_bitField0_ |= 0x00000020;
-              }
-              coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry));
-              break;
-            }
-            case 56: {
-              bitField0_ |= 0x00000010;
-              reportStartTime_ = input.readUInt64();
-              break;
-            }
-            case 64: {
-              bitField0_ |= 0x00000020;
-              reportEndTime_ = input.readUInt64();
-              break;
-            }
-            case 72: {
-              bitField0_ |= 0x00000040;
-              infoServerPort_ = input.readUInt32();
+              timeStampsOfLastAppliedOp_ = input.readUInt64();
               break;
             }
           }
@@ -4706,1957 +4440,4480 @@ public final class ClusterStatusProtos {
         throw new com.google.protobuf.InvalidProtocolBufferException(
             e.getMessage()).setUnfinishedMessage(this);
       } finally {
-        if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
-          regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
-        }
-        if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
-          coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_);
-        }
         this.unknownFields = unknownFields.build();
         makeExtensionsImmutable();
       }
     }
     public static final com.google.protobuf.Descriptors.Descriptor
         getDescriptor() {
-      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
+      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
     }
 
     protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
         internalGetFieldAccessorTable() {
-      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable
+      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable
           .ensureFieldAccessorsInitialized(
-              org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
+              org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
     }
 
-    public static com.google.protobuf.Parser<ServerLoad> PARSER =
-        new com.google.protobuf.AbstractParser<ServerLoad>() {
-      public ServerLoad parsePartialFrom(
+    public static com.google.protobuf.Parser<ReplicationLoadSink> PARSER =
+        new com.google.protobuf.AbstractParser<ReplicationLoadSink>() {
+      public ReplicationLoadSink parsePartialFrom(
           com.google.protobuf.CodedInputStream input,
           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
           throws com.google.protobuf.InvalidProtocolBufferException {
-        return new ServerLoad(input, extensionRegistry);
+        return new ReplicationLoadSink(input, extensionRegistry);
       }
     };
 
     @java.lang.Override
-    public com.google.protobuf.Parser<ServerLoad> getParserForType() {
+    public com.google.protobuf.Parser<ReplicationLoadSink> getParserForType() {
       return PARSER;
     }
 
     private int bitField0_;
-    // optional uint32 number_of_requests = 1;
-    public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1;
-    private int numberOfRequests_;
+    // required uint64 ageOfLastAppliedOp = 1;
+    public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1;
+    private long ageOfLastAppliedOp_;
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
-     *
-     * <pre>
-     ** Number of requests since last report. 
-     * </pre>
+     * <code>required uint64 ageOfLastAppliedOp = 1;</code>
      */
-    public boolean hasNumberOfRequests() {
+    public boolean hasAgeOfLastAppliedOp() {
       return ((bitField0_ & 0x00000001) == 0x00000001);
     }
     /**
-     * <code>optional uint32 number_of_requests = 1;</code>
-     *
-     * <pre>
-     ** Number of requests since last report. 
-     * </pre>
+     * <code>required uint64 ageOfLastAppliedOp = 1;</code>
      */
-    public int getNumberOfRequests() {
-      return numberOfRequests_;
+    public long getAgeOfLastAppliedOp() {
+      return ageOfLastAppliedOp_;
     }
 
-    // optional uint32 total_number_of_requests = 2;
-    public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2;
-    private int totalNumberOfRequests_;
+    // required uint64 timeStampsOfLastAppliedOp = 2;
+    public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2;
+    private long timeStampsOfLastAppliedOp_;
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
-     *
-     * <pre>
-     ** Total Number of requests from the start of the region server. 
-     * </pre>
+     * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
      */
-    public boolean hasTotalNumberOfRequests() {
+    public boolean hasTimeStampsOfLastAppliedOp() {
       return ((bitField0_ & 0x00000002) == 0x00000002);
     }
     /**
-     * <code>optional uint32 total_number_of_requests = 2;</code>
-     *
-     * <pre>
-     ** Total Number of requests from the start of the region server. 
-     * </pre>
+     * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
      */
-    public int getTotalNumberOfRequests() {
-      return totalNumberOfRequests_;
+    public long getTimeStampsOfLastAppliedOp() {
+      return timeStampsOfLastAppliedOp_;
     }
 
-    // optional uint32 used_heap_MB = 3;
-    public static final int USED_HEAP_MB_FIELD_NUMBER = 3;
-    private int usedHeapMB_;
-    /**
-     * <code>optional uint32 used_heap_MB = 3;</code>
-     *
-     * <pre>
-     ** the amount of used heap, in MB. 
-     * </pre>
-     */
-    public boolean hasUsedHeapMB() {
-      return ((bitField0_ & 0x00000004) == 0x00000004);
+    private void initFields() {
+      ageOfLastAppliedOp_ = 0L;
+      timeStampsOfLastAppliedOp_ = 0L;
     }
-    /**
-     * <code>optional uint32 used_heap_MB = 3;</code>
-     *
-     * <pre>
-     ** the amount of used heap, in MB. 
-     * </pre>
-     */
-    public int getUsedHeapMB() {
-      return usedHeapMB_;
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasAgeOfLastAppliedOp()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasTimeStampsOfLastAppliedOp()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
     }
 
-    // optional uint32 max_heap_MB = 4;
-    public static final int MAX_HEAP_MB_FIELD_NUMBER = 4;
-    private int maxHeapMB_;
-    /**
-     * <code>optional uint32 max_heap_MB = 4;</code>
-     *
-     * <pre>
-     ** the maximum allowable size of the heap, in MB. 
-     * </pre>
-     */
-    public boolean hasMaxHeapMB() {
-      return ((bitField0_ & 0x00000008) == 0x00000008);
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, ageOfLastAppliedOp_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, timeStampsOfLastAppliedOp_);
+      }
+      getUnknownFields().writeTo(output);
     }
-    /**
-     * <code>optional uint32 max_heap_MB = 4;</code>
-     *
-     * <pre>
-     ** the maximum allowable size of the heap, in MB. 
-     * </pre>
-     */
-    public int getMaxHeapMB() {
-      return maxHeapMB_;
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, ageOfLastAppliedOp_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, timeStampsOfLastAppliedOp_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
     }
 
-    // repeated .RegionLoad region_loads = 5;
-    public static final int REGION_LOADS_FIELD_NUMBER = 5;
-    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_;
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
-      return regionLoads_;
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
     }
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> 
-        getRegionLoadsOrBuilderList() {
-      return regionLoads_;
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj;
+
+      boolean result = true;
+      result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp());
+      if (hasAgeOfLastAppliedOp()) {
+        result = result && (getAgeOfLastAppliedOp()
+            == other.getAgeOfLastAppliedOp());
+      }
+      result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp());
+      if (hasTimeStampsOfLastAppliedOp()) {
+        result = result && (getTimeStampsOfLastAppliedOp()
+            == other.getTimeStampsOfLastAppliedOp());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
     }
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    public int getRegionLoadsCount() {
-      return regionLoads_.size();
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasAgeOfLastAppliedOp()) {
+        hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp());
+      }
+      if (hasTimeStampsOfLastAppliedOp()) {
+        hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
     }
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
-      return regionLoads_.get(index);
+
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
     }
-    /**
-     * <code>repeated .RegionLoad region_loads = 5;</code>
-     *
-     * <pre>
-     ** Information on the load of individual regions. 
-     * </pre>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
-        int index) {
-      return regionLoads_.get(index);
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
     }
-
-    // repeated .Coprocessor coprocessors = 6;
-    public static final int COPROCESSORS_FIELD_NUMBER = 6;
-    private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> coprocessors_;
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getCoprocessorsList() {
-      return coprocessors_;
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
     }
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> 
-        getCoprocessorsOrBuilderList() {
-      return coprocessors_;
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
     }
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    public int getCoprocessorsCount() {
-      return coprocessors_.size();
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
     }
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) {
-      return coprocessors_.get(index);
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
     }
-    /**
-     * <code>repeated .Coprocessor coprocessors = 6;</code>
-     *
-     * <pre>
-     **
-     * Regionserver-level coprocessors, e.g., WALObserver implementations.
-     * Region-level coprocessors, on the other hand, are stored inside RegionLoad
-     * objects.
-     * </pre>
-     */
-    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
-        int index) {
-      return coprocessors_.get(index);
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
     }
-
-    // optional uint64 report_start_time = 7;
-    public static final int REPORT_START_TIME_FIELD_NUMBER = 7;
-    private long reportStartTime_;
-    /**
-     * <code>optional uint64 report_start_time = 7;</code>
-     *
-     * <pre>
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    public boolean hasReportStartTime() {
-      return ((bitField0_ & 0x00000010) == 0x00000010);
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
     }
-    /**
-     * <code>optional uint64 report_start_time = 7;</code>
-     *
-     * <pre>
-     **
-     * Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    public long getReportStartTime() {
-      return reportStartTime_;
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
     }
-
-    // optional uint64 report_end_time = 8;
-    public static final int REPORT_END_TIME_FIELD_NUMBER = 8;
-    private long reportEndTime_;
-    /**
-     * <code>optional uint64 report_end_time = 8;</code>
-     *
-     * <pre>
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    public boolean hasReportEndTime() {
-      return ((bitField0_ & 0x00000020) == 0x00000020);
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
     }
-    /**
-     * <code>optional uint64 report_end_time = 8;</code>
-     *
-     * <pre>
-     **
-     * Time when report was generated.
-     * time is measured as the difference, measured in milliseconds, between the current time
-     * and midnight, January 1, 1970 UTC.
-     * </pre>
-     */
-    public long getReportEndTime() {
-      return reportEndTime_;
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) {
+      return newBuilder().mergeFrom(prototype);
     }
+    public Builder toBuilder() { return newBuilder(this); }
 
-    // optional uint32 info_server_port = 9;
-    public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9;
-    private int infoServerPort_;
-    /**
-     * <code>optional uint32 info_server_port = 9;</code>
-     *
-     * <pre>
-     **
-     * The port number that this region server is hosing an info server on.
-     * </pre>
-     */
-    public boolean hasInfoServerPort() {
-      return ((bitField0_ & 0x00000040) == 0x00000040);
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
     }
     /**
-     * <code>optional uint32 info_server_port = 9;</code>
-     *
-     * <pre>
-     **
-     * The port number that this region server is hosing an info server on.
-     * </pre>
+     * Protobuf type {@code ReplicationLoadSink}
      */
-    public int getInfoServerPort() {
-      return infoServerPort_;
-    }
-
-    private void initFields() {
-      numberOfRequests_ = 0;
-      totalNumberOfRequests_ = 0;
-      usedHeapMB_ = 0;
-      maxHeapMB_ = 0;
-      regionLoads_ = java.util.Collections.emptyList();
-      coprocessors_ = java.util.Collections.emptyList();
-      reportStartTime_ = 0L;
-      reportEndTime_ = 0L;
-      infoServerPort_ = 0;
-    }
-    private byte memoizedIsInitialized = -1;
-    public final boolean isInitialized() {
-      byte isInitialized = memoizedIsInitialized;
-      if (isInitialized != -1) return isInitialized == 1;
-
-      for (int i = 0; i < getRegionLoadsCount(); i++) {
-        if (!getRegionLoads(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
       }
-      for (int i = 0; i < getCoprocessorsCount(); i++) {
-        if (!getCoprocessors(i).isInitialized()) {
-          memoizedIsInitialized = 0;
-          return false;
-        }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
       }
-      memoizedIsInitialized = 1;
-      return true;
-    }
 
-    public void writeTo(com.google.protobuf.CodedOutputStream output)
-                        throws java.io.IOException {
-      getSerializedSize();
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        output.writeUInt32(1, numberOfRequests_);
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
       }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        output.writeUInt32(2, totalNumberOfRequests_);
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
       }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        output.writeUInt32(3, usedHeapMB_);
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
       }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        output.writeUInt32(4, maxHeapMB_);
+      private static Builder create() {
+        return new Builder();
       }
-      for (int i = 0; i < regionLoads_.size(); i++) {
-        output.writeMessage(5, regionLoads_.get(i));
+
+      public Builder clear() {
+        super.clear();
+        ageOfLastAppliedOp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        timeStampsOfLastAppliedOp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
       }
-      for (int i = 0; i < coprocessors_.size(); i++) {
-        output.writeMessage(6, coprocessors_.get(i));
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
       }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        output.writeUInt64(7, reportStartTime_);
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
       }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        output.writeUInt64(8, reportEndTime_);
+
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
       }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
-        output.writeUInt32(9, infoServerPort_);
-      }
-      getUnknownFields().writeTo(output);
-    }
-
-    private int memoizedSerializedSize = -1;
-    public int getSerializedSize() {
-      int size = memoizedSerializedSize;
-      if (size != -1) return size;
 
-      size = 0;
-      if (((bitField0_ & 0x00000001) == 0x00000001)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(1, numberOfRequests_);
-      }
-      if (((bitField0_ & 0x00000002) == 0x00000002)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(2, totalNumberOfRequests_);
-      }
-      if (((bitField0_ & 0x00000004) == 0x00000004)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(3, usedHeapMB_);
-      }
-      if (((bitField0_ & 0x00000008) == 0x00000008)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(4, maxHeapMB_);
-      }
-      for (int i = 0; i < regionLoads_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(5, regionLoads_.get(i));
-      }
-      for (int i = 0; i < coprocessors_.size(); i++) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeMessageSize(6, coprocessors_.get(i));
-      }
-      if (((bitField0_ & 0x00000010) == 0x00000010)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(7, reportStartTime_);
-      }
-      if (((bitField0_ & 0x00000020) == 0x00000020)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt64Size(8, reportEndTime_);
-      }
-      if (((bitField0_ & 0x00000040) == 0x00000040)) {
-        size += com.google.protobuf.CodedOutputStream
-          .computeUInt32Size(9, infoServerPort_);
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
       }
-      size += getUnknownFields().getSerializedSize();
-      memoizedSerializedSize = size;
-      return size;
-    }
-
-    private static final long serialVersionUID = 0L;
-    @java.lang.Override
-    protected java.lang.Object writeReplace()
-        throws java.io.ObjectStreamException {
-      return super.writeReplace();
-    }
 
-    @java.lang.Override
-    public boolean equals(final java.lang.Object obj) {
-      if (obj == this) {
-       return true;
-      }
-      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) {
-        return super.equals(obj);
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
       }
-      org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj;
 
-      boolean result = true;
-      result = result && (hasNumberOfRequests() == other.hasNumberOfRequests());
-      if (hasNumberOfRequests()) {
-        result = result && (getNumberOfRequests()
-            == other.getNumberOfRequests());
-      }
-      result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests());
-      if (hasTotalNumberOfRequests()) {
-        result = result && (getTotalNumberOfRequests()
-            == other.getTotalNumberOfRequests());
-      }
-      result = result && (hasUsedHeapMB() == other.hasUsedHeapMB());
-      if (hasUsedHeapMB()) {
-        result = result && (getUsedHeapMB()
-            == other.getUsedHeapMB());
-      }
-      result = result && (hasMaxHeapMB() == other.hasMaxHeapMB());
-      if (hasMaxHeapMB()) {
-        result = result && (getMaxHeapMB()
-            == other.getMaxHeapMB());
-      }
-      result = result && getRegionLoadsList()
-          .equals(other.getRegionLoadsList());
-      result = result && getCoprocessorsList()
-          .equals(other.getCoprocessorsList());
-      result = result && (hasReportStartTime() == other.hasReportStartTime());
-      if (hasReportStartTime()) {
-        result = result && (getReportStartTime()
-            == other.getReportStartTime());
-      }
-      result = result && (hasReportEndTime() == other.hasReportEndTime());
-      if (hasReportEndTime()) {
-        result = result && (getReportEndTime()
-            == other.getReportEndTime());
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
       }
-      result = result && (hasInfoServerPort() == other.hasInfoServerPort());
-      if (hasInfoServerPort()) {
-        result = result && (getInfoServerPort()
-            == other.getInfoServerPort());
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this;
+        if (other.hasAgeOfLastAppliedOp()) {
+          setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp());
+        }
+        if (other.hasTimeStampsOfLastAppliedOp()) {
+          setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
       }
-      result = result &&
-          getUnknownFields().equals(other.getUnknownFields());
-      return result;
-    }
 
-    private int memoizedHashCode = 0;
-    @java.lang.Override
-    public int hashCode() {
-      if (memoizedHashCode != 0) {
-        return memoizedHashCode;
+      public final boolean isInitialized() {
+        if (!hasAgeOfLastAppliedOp()) {
+          
+          return false;
+        }
+        if (!hasTimeStampsOfLastAppliedOp()) {
+          
+          return false;
+        }
+        return true;
       }
-      int hash = 41;
-      hash = (19 * hash) + getDescriptorForType().hashCode();
-      if (hasNumberOfRequests()) {
-        hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER;
-        hash = (53 * hash) + getNumberOfRequests();
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
       }
-      if (hasTotalNumberOfRequests()) {
-        hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER;
-        hash = (53 * hash) + getTotalNumberOfRequests();
+      private int bitField0_;
+
+      // required uint64 ageOfLastAppliedOp = 1;
+      private long ageOfLastAppliedOp_ ;
+      /**
+       * <code>required uint64 ageOfLastAppliedOp = 1;</code>
+       */
+      public boolean hasAgeOfLastAppliedOp() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
       }
-      if (hasUsedHeapMB()) {
-        hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER;
-        hash = (53 * hash) + getUsedHeapMB();
+      /**
+       * <code>required uint64 ageOfLastAppliedOp = 1;</code>
+       */
+      public long getAgeOfLastAppliedOp() {
+        return ageOfLastAppliedOp_;
       }
-      if (hasMaxHeapMB()) {
-        hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER;
-        hash = (53 * hash) + getMaxHeapMB();
+      /**
+       * <code>required uint64 ageOfLastAppliedOp = 1;</code>
+       */
+      public Builder setAgeOfLastAppliedOp(long value) {
+        bitField0_ |= 0x00000001;
+        ageOfLastAppliedOp_ = value;
+        onChanged();
+        return this;
       }
-      if (getRegionLoadsCount() > 0) {
-        hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER;
-        hash = (53 * hash) + getRegionLoadsList().hashCode();
+      /**
+       * <code>required uint64 ageOfLastAppliedOp = 1;</code>
+       */
+      public Builder clearAgeOfLastAppliedOp() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        ageOfLastAppliedOp_ = 0L;
+        onChanged();
+        return this;
       }
-      if (getCoprocessorsCount() > 0) {
-        hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
-        hash = (53 * hash) + getCoprocessorsList().hashCode();
+
+      // required uint64 timeStampsOfLastAppliedOp = 2;
+      private long timeStampsOfLastAppliedOp_ ;
+      /**
+       * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
+       */
+      public boolean hasTimeStampsOfLastAppliedOp() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
       }
-      if (hasReportStartTime()) {
-        hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getReportStartTime());
+      /**
+       * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
+       */
+      public long getTimeStampsOfLastAppliedOp() {
+        return timeStampsOfLastAppliedOp_;
       }
-      if (hasReportEndTime()) {
-        hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER;
-        hash = (53 * hash) + hashLong(getReportEndTime());
+      /**
+       * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
+       */
+      public Builder setTimeStampsOfLastAppliedOp(long value) {
+        bitField0_ |= 0x00000002;
+        timeStampsOfLastAppliedOp_ = value;
+        onChanged();
+        return this;
       }
-      if (hasInfoServerPort()) {
-        hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER;
-        hash = (53 * hash) + getInfoServerPort();
+      /**
+       * <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
+       */
+      public Builder clearTimeStampsOfLastAppliedOp() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        timeStampsOfLastAppliedOp_ = 0L;
+        onChanged();
+        return this;
       }
-      hash = (29 * hash) + getUnknownFields().hashCode();
-      memoizedHashCode = hash;
-      return hash;
-    }
 
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
-        com.google.protobuf.ByteString data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
-        com.google.protobuf.ByteString data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data);
+      // @@protoc_insertion_point(builder_scope:ReplicationLoadSink)
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
-        byte[] data,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws com.google.protobuf.InvalidProtocolBufferException {
-      return PARSER.parseFrom(data, extensionRegistry);
-    }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
+
+    static {
+      defaultInstance = new ReplicationLoadSink(true);
+      defaultInstance.initFields();
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+
+    // @@protoc_insertion_point(class_scope:ReplicationLoadSink)
+  }
+
+  public interface ReplicationLoadSourceOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string peerID = 1;
+    /**
+     * <code>required string peerID = 1;</code>
+     */
+    boolean hasPeerID();
+    /**
+     * <code>required string peerID = 1;</code>
+     */
+    java.lang.String getPeerID();
+    /**
+     * <code>required string peerID = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getPeerIDBytes();
+
+    // required uint64 ageOfLastShippedOp = 2;
+    /**
+     * <code>required uint64 ageOfLastShippedOp = 2;</code>
+     */
+    boolean hasAgeOfLastShippedOp();
+    /**
+     * <code>required uint64 ageOfLastShippedOp = 2;</code>
+     */
+    long getAgeOfLastShippedOp();
+
+    // required uint32 sizeOfLogQueue = 3;
+    /**
+     * <code>required uint32 sizeOfLogQueue = 3;</code>
+     */
+    boolean hasSizeOfLogQueue();
+    /**
+     * <code>required uint32 sizeOfLogQueue = 3;</code>
+     */
+    int getSizeOfLogQueue();
+
+    // required uint64 timeStampOfLastShippedOp = 4;
+    /**
+     * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+     */
+    boolean hasTimeStampOfLastShippedOp();
+    /**
+     * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+     */
+    long getTimeStampOfLastShippedOp();
+
+    // required uint64 replicationLag = 5;
+    /**
+     * <code>required uint64 replicationLag = 5;</code>
+     */
+    boolean hasReplicationLag();
+    /**
+     * <code>required uint64 replicationLag = 5;</code>
+     */
+    long getReplicationLag();
+  }
+  /**
+   * Protobuf type {@code ReplicationLoadSource}
+   */
+  public static final class ReplicationLoadSource extends
+      com.google.protobuf.GeneratedMessage
+      implements ReplicationLoadSourceOrBuilder {
+    // Use ReplicationLoadSource.newBuilder() to construct.
+    private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input);
+    private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final ReplicationLoadSource defaultInstance;
+    public static ReplicationLoadSource getDefaultInstance() {
+      return defaultInstance;
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(
-        java.io.InputStream input,
-        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+
+    public ReplicationLoadSource getDefaultInstanceForType() {
+      return defaultInstance;
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
-        com.google.protobuf.CodedInputStream input)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input);
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
     }
-    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
+    private ReplicationLoadSource(
         com.google.protobuf.CodedInputStream input,
         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-        throws java.io.IOException {
-      return PARSER.parseFrom(input, extensionRegistry);
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              peerID_ = input.readBytes();
+              break;
+            }
+            case 16: {
+              bitField0_ |= 0x00000002;
+              ageOfLastShippedOp_ = input.readUInt64();
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              sizeOfLogQueue_ = input.readUInt32();
+              break;
+            }
+            case 32: {
+              bitField0_ |= 0x00000008;
+              timeStampOfLastShippedOp_ = input.readUInt64();
+              break;
+            }
+            case 40: {
+              bitField0_ |= 0x00000010;
+              replicationLag_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
     }
 
-    public static Builder newBuilder() { return Builder.create(); }
-    public Builder newBuilderForType() { return newBuilder(); }
-    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) {
-      return newBuilder().mergeFrom(prototype);
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
     }
-    public Builder toBuilder() { return newBuilder(this); }
+
+    public static com.google.protobuf.Parser<ReplicationLoadSource> PARSER =
+        new com.google.protobuf.AbstractParser<ReplicationLoadSource>() {
+      public ReplicationLoadSource parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new ReplicationLoadSource(input, extensionRegistry);
+      }
+    };
 
     @java.lang.Override
-    protected Builder newBuilderForType(
-        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-      Builder builder = new Builder(parent);
-      return builder;
+    public com.google.protobuf.Parser<ReplicationLoadSource> getParserForType() {
+      return PARSER;
     }
+
+    private int bitField0_;
+    // required string peerID = 1;
+    public static final int PEERID_FIELD_NUMBER = 1;
+    private java.lang.Object peerID_;
     /**
-     * Protobuf type {@code ServerLoad}
+     * <code>required string peerID = 1;</code>
      */
-    public static final class Builder extends
-        com.google.protobuf.GeneratedMessage.Builder<Builder>
-       implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder {
-      public static final com.google.protobuf.Descriptors.Descriptor
-          getDescriptor() {
-        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
+    public boolean hasPeerID() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string peerID = 1;</code>
+     */
+    public java.lang.String getPeerID() {
+      java.lang.Object ref = peerID_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          peerID_ = s;
+        }
+        return s;
       }
-
-      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-          internalGetFieldAccessorTable() {
-        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable
-            .ensureFieldAccessorsInitialized(
-                org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
+    }
+    /**
+     * <code>required string peerID = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getPeerIDBytes() {
+      java.lang.Object ref = peerID_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        peerID_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
       }
+    }
 
-      // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder()
-      private Builder() {
-        maybeForceBuilderInitialization();
-      }
+    // required uint64 ageOfLastShippedOp = 2;
+    public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2;
+    private long ageOfLastShippedOp_;
+    /**
+     * <code>required uint64 ageOfLastShippedOp = 2;</code>
+     */
+    public boolean hasAgeOfLastShippedOp() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required uint64 ageOfLastShippedOp = 2;</code>
+     */
+    public long getAgeOfLastShippedOp() {
+      return ageOfLastShippedOp_;
+    }
 
-      private Builder(
-          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
-        super(parent);
-        maybeForceBuilderInitialization();
-      }
-      private void maybeForceBuilderInitialization() {
-        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
-          getRegionLoadsFieldBuilder();
-          getCoprocessorsFieldBuilder();
-        }
-      }
-      private static Builder create() {
-        return new Builder();
-      }
+    // required uint32 sizeOfLogQueue = 3;
+    public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3;
+    private int sizeOfLogQueue_;
+    /**
+     * <code>required uint32 sizeOfLogQueue = 3;</code>
+     */
+    public boolean hasSizeOfLogQueue() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required uint32 sizeOfLogQueue = 3;</code>
+     */
+    public int getSizeOfLogQueue() {
+      return sizeOfLogQueue_;
+    }
 
-      public Builder clear() {
-        super.clear();
-        numberOfRequests_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000001);
-        totalNumberOfRequests_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000002);
-        usedHeapMB_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000004);
-        maxHeapMB_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000008);
-        if (regionLoadsBuilder_ == null) {
-          regionLoads_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000010);
-        } else {
-          regionLoadsBuilder_.clear();
-        }
-        if (coprocessorsBuilder_ == null) {
-          coprocessors_ = java.util.Collections.emptyList();
-          bitField0_ = (bitField0_ & ~0x00000020);
-        } else {
-          coprocessorsBuilder_.clear();
-        }
-        reportStartTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000040);
-        reportEndTime_ = 0L;
-        bitField0_ = (bitField0_ & ~0x00000080);
-        infoServerPort_ = 0;
-        bitField0_ = (bitField0_ & ~0x00000100);
-        return this;
-      }
+    // required uint64 timeStampOfLastShippedOp = 4;
+    public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4;
+    private long timeStampOfLastShippedOp_;
+    /**
+     * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+     */
+    public boolean hasTimeStampOfLastShippedOp() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+     */
+    public long getTimeStampOfLastShippedOp() {
+      return timeStampOfLastShippedOp_;
+    }
 
-      public Builder clone() {
-        return create().mergeFrom(buildPartial());
-      }
+    // required uint64 replicationLag = 5;
+    public static final int REPLICATIONLAG_FIELD_NUMBER = 5;
+    private long replicationLag_;
+    /**
+     * <code>required uint64 replicationLag = 5;</code>
+     */
+    public boolean hasReplicationLag() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>required uint64 replicationLag = 5;</code>
+     */
+    public long getReplicationLag() {
+      return replicationLag_;
+    }
 
-      public com.google.protobuf.Descriptors.Descriptor
-          getDescriptorForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
-      }
+    private void initFields() {
+      peerID_ = "";
+      ageOfLastShippedOp_ = 0L;
+      sizeOfLogQueue_ = 0;
+      timeStampOfLastShippedOp_ = 0L;
+      replicationLag_ = 0L;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
 
-      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() {
-        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
+      if (!hasPeerID()) {
+        memoizedIsInitialized = 0;
+        return false;
       }
-
-      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() {
-        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial();
+      if (!hasAgeOfLastShippedOp()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasSizeOfLogQueue()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasTimeStampOfLastShippedOp()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasReplicationLag()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getPeerIDBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeUInt64(2, ageOfLastShippedOp_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt32(3, sizeOfLogQueue_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeUInt64(4, timeStampOfLastShippedOp_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeUInt64(5, replicationLag_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getPeerIDBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(2, ageOfLastShippedOp_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(3, sizeOfLogQueue_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(4, timeStampOfLastShippedOp_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(5, replicationLag_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj;
+
+      boolean result = true;
+      result = result && (hasPeerID() == other.hasPeerID());
+      if (hasPeerID()) {
+        result = result && getPeerID()
+            .equals(other.getPeerID());
+      }
+      result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp());
+      if (hasAgeOfLastShippedOp()) {
+        result = result && (getAgeOfLastShippedOp()
+            == other.getAgeOfLastShippedOp());
+      }
+      result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue());
+      if (hasSizeOfLogQueue()) {
+        result = result && (getSizeOfLogQueue()
+            == other.getSizeOfLogQueue());
+      }
+      result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp());
+      if (hasTimeStampOfLastShippedOp()) {
+        result = result && (getTimeStampOfLastShippedOp()
+            == other.getTimeStampOfLastShippedOp());
+      }
+      result = result && (hasReplicationLag() == other.hasReplicationLag());
+      if (hasReplicationLag()) {
+        result = result && (getReplicationLag()
+            == other.getReplicationLag());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasPeerID()) {
+        hash = (37 * hash) + PEERID_FIELD_NUMBER;
+        hash = (53 * hash) + getPeerID().hashCode();
+      }
+      if (hasAgeOfLastShippedOp()) {
+        hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getAgeOfLastShippedOp());
+      }
+      if (hasSizeOfLogQueue()) {
+        hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER;
+        hash = (53 * hash) + getSizeOfLogQueue();
+      }
+      if (hasTimeStampOfLastShippedOp()) {
+        hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp());
+      }
+      if (hasReplicationLag()) {
+        hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getReplicationLag());
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code ReplicationLoadSource}
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        peerID_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        ageOfLastShippedOp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        sizeOfLogQueue_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        timeStampOfLastShippedOp_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000008);
+        replicationLag_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000010);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial();
         if (!result.isInitialized()) {
           throw newUninitializedMessageException(result);
         }
         return result;
       }
 
-      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() {
-        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this);
+      public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this);
         int from_bitField0_ = bitField0_;
         int to_bitField0_ = 0;
         if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
           to_bitField0_ |= 0x00000001;
         }
-        result.numberOfRequests_ = numberOfRequests_;
+        result.peerID_ = peerID_;
         if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
           to_bitField0_ |= 0x00000002;
         }
-        result.totalNumberOfRequests_ = totalNumberOfRequests_;
+        result.ageOfLastShippedOp_ = ageOfLastShippedOp_;
         if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
           to_bitField0_ |= 0x00000004;
         }
-        result.usedHeapMB_ = usedHeapMB_;
+        result.sizeOfLogQueue_ = sizeOfLogQueue_;
         if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
           to_bitField0_ |= 0x00000008;
         }
-        result.maxHeapMB_ = maxHeapMB_;
+        result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.replicationLag_ = replicationLag_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this;
+        if (other.hasPeerID()) {
+          bitField0_ |= 0x00000001;
+          peerID_ = other.peerID_;
+          onChanged();
+        }
+        if (other.hasAgeOfLastShippedOp()) {
+          setAgeOfLastShippedOp(other.getAgeOfLastShippedOp());
+        }
+        if (other.hasSizeOfLogQueue()) {
+          setSizeOfLogQueue(other.getSizeOfLogQueue());
+        }
+        if (other.hasTimeStampOfLastShippedOp()) {
+          setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp());
+        }
+        if (other.hasReplicationLag()) {
+          setReplicationLag(other.getReplicationLag());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasPeerID()) {
+          
+          return false;
+        }
+        if (!hasAgeOfLastShippedOp()) {
+          
+          return false;
+        }
+        if (!hasSizeOfLogQueue()) {
+          
+          return false;
+        }
+        if (!hasTimeStampOfLastShippedOp()) {
+          
+          return false;
+        }
+        if (!hasReplicationLag()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string peerID = 1;
+      private java.lang.Object peerID_ = "";
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public boolean hasPeerID() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public java.lang.String getPeerID() {
+        java.lang.Object ref = peerID_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          peerID_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getPeerIDBytes() {
+        java.lang.Object ref = peerID_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          peerID_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public Builder setPeerID(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerID_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public Builder clearPeerID() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        peerID_ = getDefaultInstance().getPeerID();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string peerID = 1;</code>
+       */
+      public Builder setPeerIDBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        peerID_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required uint64 ageOfLastShippedOp = 2;
+      private long ageOfLastShippedOp_ ;
+      /**
+       * <code>required uint64 ageOfLastShippedOp = 2;</code>
+       */
+      public boolean hasAgeOfLastShippedOp() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required uint64 ageOfLastShippedOp = 2;</code>
+       */
+      public long getAgeOfLastShippedOp() {
+        return ageOfLastShippedOp_;
+      }
+      /**
+       * <code>required uint64 ageOfLastShippedOp = 2;</code>
+       */
+      public Builder setAgeOfLastShippedOp(long value) {
+        bitField0_ |= 0x00000002;
+        ageOfLastShippedOp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint64 ageOfLastShippedOp = 2;</code>
+       */
+      public Builder clearAgeOfLastShippedOp() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        ageOfLastShippedOp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required uint32 sizeOfLogQueue = 3;
+      private int sizeOfLogQueue_ ;
+      /**
+       * <code>required uint32 sizeOfLogQueue = 3;</code>
+       */
+      public boolean hasSizeOfLogQueue() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required uint32 sizeOfLogQueue = 3;</code>
+       */
+      public int getSizeOfLogQueue() {
+        return sizeOfLogQueue_;
+      }
+      /**
+       * <code>required uint32 sizeOfLogQueue = 3;</code>
+       */
+      public Builder setSizeOfLogQueue(int value) {
+        bitField0_ |= 0x00000004;
+        sizeOfLogQueue_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint32 sizeOfLogQueue = 3;</code>
+       */
+      public Builder clearSizeOfLogQueue() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        sizeOfLogQueue_ = 0;
+        onChanged();
+        return this;
+      }
+
+      // required uint64 timeStampOfLastShippedOp = 4;
+      private long timeStampOfLastShippedOp_ ;
+      /**
+       * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+       */
+      public boolean hasTimeStampOfLastShippedOp() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+       */
+      public long getTimeStampOfLastShippedOp() {
+        return timeStampOfLastShippedOp_;
+      }
+      /**
+       * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+       */
+      public Builder setTimeStampOfLastShippedOp(long value) {
+        bitField0_ |= 0x00000008;
+        timeStampOfLastShippedOp_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint64 timeStampOfLastShippedOp = 4;</code>
+       */
+      public Builder clearTimeStampOfLastShippedOp() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        timeStampOfLastShippedOp_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      // required uint64 replicationLag = 5;
+      private long replicationLag_ ;
+      /**
+       * <code>required uint64 replicationLag = 5;</code>
+       */
+      public boolean hasReplicationLag() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>required uint64 replicationLag = 5;</code>
+       */
+      public long getReplicationLag() {
+        return replicationLag_;
+      }
+      /**
+       * <code>required uint64 replicationLag = 5;</code>
+       */
+      public Builder setReplicationLag(long value) {
+        bitField0_ |= 0x00000010;
+        replicationLag_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint64 replicationLag = 5;</code>
+       */
+      public Builder clearReplicationLag() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        replicationLag_ = 0

<TRUNCATED>