You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2015/04/30 23:22:01 UTC

[1/6] hbase git commit: HBASE-13216 Add version info in RPC connection header (Shaohui Liu)

Repository: hbase
Updated Branches:
  refs/heads/0.98 fb917d504 -> 0f3de8a17
  refs/heads/branch-1 e69e55b3f -> 6d40b547a
  refs/heads/branch-1.0 dac1dc74c -> a17a3607e
  refs/heads/branch-1.1 94a29e2fd -> 786a413ec
  refs/heads/master 51ce568d6 -> 81e793e58


HBASE-13216 Add version info in RPC connection header (Shaohui Liu)

Amending-Author: Andrew Purtell <ap...@apache.org>

Conflicts:
	hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
	hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
	hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0170e79e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0170e79e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0170e79e

Branch: refs/heads/0.98
Commit: 0170e79e1001b4ced27f7aff168cbe94b5ff9d25
Parents: fb917d5
Author: Liu Shaohui <li...@xiaomi.com>
Authored: Thu Mar 19 10:30:55 2015 +0800
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 12:32:54 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/ipc/RpcClient.java  |    1 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |   17 +
 .../apache/hadoop/hbase/VersionAnnotation.java  |    6 +
 .../apache/hadoop/hbase/util/VersionInfo.java   |   13 +-
 hbase-common/src/saveVersion.sh                 |   14 +-
 .../hbase/protobuf/generated/RPCProtos.java     | 1664 +++++++++++++++++-
 hbase-protocol/src/main/protobuf/RPC.proto      |   11 +
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |    8 +
 8 files changed, 1708 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
index 27c70bc..7de0f97 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClient.java
@@ -443,6 +443,7 @@ public class RpcClient {
       if (this.compressor != null) {
         builder.setCellBlockCompressorClass(this.compressor.getClass().getCanonicalName());
       }
+      builder.setVersionInfo(ProtobufUtil.getVersionInfo());
       this.header = builder.build();
 
       this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index e19c5b3..3d596ef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -108,6 +108,7 @@ import org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.protobuf.generated.RPCProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
@@ -125,6 +126,7 @@ import org.apache.hadoop.hbase.util.DynamicClassLoader;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
 import org.apache.hadoop.hbase.util.Methods;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
@@ -2811,4 +2813,19 @@ public final class ProtobufUtil {
     return rlsList;
   }
 
+  /**
+   * Get a protocol buffer VersionInfo
+   *
+   * @return the converted protocol buffer VersionInfo
+   */
+  public static RPCProtos.VersionInfo getVersionInfo() {
+    RPCProtos.VersionInfo.Builder builder = RPCProtos.VersionInfo.newBuilder();
+    builder.setVersion(VersionInfo.getVersion());
+    builder.setUrl(VersionInfo.getUrl());
+    builder.setRevision(VersionInfo.getRevision());
+    builder.setUser(VersionInfo.getUser());
+    builder.setDate(VersionInfo.getDate());
+    builder.setSrcChecksum(VersionInfo.getSrcChecksum());
+    return builder.build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
index 081661c..f3137ae 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/VersionAnnotation.java
@@ -57,4 +57,10 @@ public @interface VersionAnnotation {
    * @return the revision number as a string (eg. "451451")
    */
   String revision();
+
+  /**
+   * Get a checksum of the source files from which HBase was compiled.
+   * @return a string that uniquely identifies the source
+   **/
+  String srcChecksum();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
index 597c2f8..aadad2e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/VersionInfo.java
@@ -93,11 +93,20 @@ public class VersionInfo {
   static String[] versionReport() {
     return new String[] {
       "HBase " + getVersion(),
-      "Subversion " + getUrl() + " -r " + getRevision(),
-      "Compiled by " + getUser() + " on " + getDate()
+      "Source code repository " + getUrl() + " revision=" + getRevision(),
+      "Compiled by " + getUser() + " on " + getDate(),
+      "From source with checksum " + getSrcChecksum()
       };
   }
 
+  /**
+   * Get the checksum of the source files from which Hadoop was compiled.
+   * @return a string that uniquely identifies the source
+   **/
+  public static String getSrcChecksum() {
+    return version != null ? version.srcChecksum() : "Unknown";
+  }
+
   public static void writeTo(PrintWriter out) {
     for (String line : versionReport()) {
       out.println(line);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-common/src/saveVersion.sh
----------------------------------------------------------------------
diff --git a/hbase-common/src/saveVersion.sh b/hbase-common/src/saveVersion.sh
index 5edb8ea..890dc5a 100644
--- a/hbase-common/src/saveVersion.sh
+++ b/hbase-common/src/saveVersion.sh
@@ -41,6 +41,17 @@ else
   revision="Unknown"
   url="file://$cwd"
 fi
+which md5sum > /dev/null
+if [ "$?" != "0" ] ; then
+  which md5 > /dev/null
+  if [ "$?" != "0" ] ; then
+    srcChecksum="Unknown"
+  else
+    srcChecksum=`find hbase-*/src/main/ | grep -e "\.java" -e "\.proto" | LC_ALL=C sort | xargs md5 | md5 | cut -d ' ' -f 1`
+  fi
+else
+  srcChecksum=`find hbase-*/src/main/ | grep -e "\.java" -e "\.proto" | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1`
+fi
 popd
 
 mkdir -p "$outputDirectory/org/apache/hadoop/hbase"
@@ -49,7 +60,8 @@ cat >"$outputDirectory/org/apache/hadoop/hbase/package-info.java" <<EOF
  * Generated by src/saveVersion.sh
  */
 @VersionAnnotation(version="$version", revision="$revision",
-                         user="$user", date="$date", url="$url")
+                         user="$user", date="$date", url="$url",
+                         srcChecksum="$srcChecksum")
 package org.apache.hadoop.hbase;
 EOF
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
index 313d7b2..41a14f3 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RPCProtos.java
@@ -698,6 +698,1396 @@ public final class RPCProtos {
     // @@protoc_insertion_point(class_scope:UserInformation)
   }
 
+  public interface VersionInfoOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+
+    // required string version = 1;
+    /**
+     * <code>required string version = 1;</code>
+     */
+    boolean hasVersion();
+    /**
+     * <code>required string version = 1;</code>
+     */
+    java.lang.String getVersion();
+    /**
+     * <code>required string version = 1;</code>
+     */
+    com.google.protobuf.ByteString
+        getVersionBytes();
+
+    // required string url = 2;
+    /**
+     * <code>required string url = 2;</code>
+     */
+    boolean hasUrl();
+    /**
+     * <code>required string url = 2;</code>
+     */
+    java.lang.String getUrl();
+    /**
+     * <code>required string url = 2;</code>
+     */
+    com.google.protobuf.ByteString
+        getUrlBytes();
+
+    // required string revision = 3;
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    boolean hasRevision();
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    java.lang.String getRevision();
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    com.google.protobuf.ByteString
+        getRevisionBytes();
+
+    // required string user = 4;
+    /**
+     * <code>required string user = 4;</code>
+     */
+    boolean hasUser();
+    /**
+     * <code>required string user = 4;</code>
+     */
+    java.lang.String getUser();
+    /**
+     * <code>required string user = 4;</code>
+     */
+    com.google.protobuf.ByteString
+        getUserBytes();
+
+    // required string date = 5;
+    /**
+     * <code>required string date = 5;</code>
+     */
+    boolean hasDate();
+    /**
+     * <code>required string date = 5;</code>
+     */
+    java.lang.String getDate();
+    /**
+     * <code>required string date = 5;</code>
+     */
+    com.google.protobuf.ByteString
+        getDateBytes();
+
+    // required string src_checksum = 6;
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    boolean hasSrcChecksum();
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    java.lang.String getSrcChecksum();
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    com.google.protobuf.ByteString
+        getSrcChecksumBytes();
+  }
+  /**
+   * Protobuf type {@code VersionInfo}
+   *
+   * <pre>
+   * Rpc client version info proto. Included in ConnectionHeader on connection setup
+   * </pre>
+   */
+  public static final class VersionInfo extends
+      com.google.protobuf.GeneratedMessage
+      implements VersionInfoOrBuilder {
+    // Use VersionInfo.newBuilder() to construct.
+    private VersionInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+      super(builder);
+      this.unknownFields = builder.getUnknownFields();
+    }
+    private VersionInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+    private static final VersionInfo defaultInstance;
+    public static VersionInfo getDefaultInstance() {
+      return defaultInstance;
+    }
+
+    public VersionInfo getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+
+    private final com.google.protobuf.UnknownFieldSet unknownFields;
+    @java.lang.Override
+    public final com.google.protobuf.UnknownFieldSet
+        getUnknownFields() {
+      return this.unknownFields;
+    }
+    private VersionInfo(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      initFields();
+      int mutable_bitField0_ = 0;
+      com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              bitField0_ |= 0x00000001;
+              version_ = input.readBytes();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              url_ = input.readBytes();
+              break;
+            }
+            case 26: {
+              bitField0_ |= 0x00000004;
+              revision_ = input.readBytes();
+              break;
+            }
+            case 34: {
+              bitField0_ |= 0x00000008;
+              user_ = input.readBytes();
+              break;
+            }
+            case 42: {
+              bitField0_ |= 0x00000010;
+              date_ = input.readBytes();
+              break;
+            }
+            case 50: {
+              bitField0_ |= 0x00000020;
+              srcChecksum_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new com.google.protobuf.InvalidProtocolBufferException(
+            e.getMessage()).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor;
+    }
+
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.class, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder.class);
+    }
+
+    public static com.google.protobuf.Parser<VersionInfo> PARSER =
+        new com.google.protobuf.AbstractParser<VersionInfo>() {
+      public VersionInfo parsePartialFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        return new VersionInfo(input, extensionRegistry);
+      }
+    };
+
+    @java.lang.Override
+    public com.google.protobuf.Parser<VersionInfo> getParserForType() {
+      return PARSER;
+    }
+
+    private int bitField0_;
+    // required string version = 1;
+    public static final int VERSION_FIELD_NUMBER = 1;
+    private java.lang.Object version_;
+    /**
+     * <code>required string version = 1;</code>
+     */
+    public boolean hasVersion() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required string version = 1;</code>
+     */
+    public java.lang.String getVersion() {
+      java.lang.Object ref = version_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          version_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string version = 1;</code>
+     */
+    public com.google.protobuf.ByteString
+        getVersionBytes() {
+      java.lang.Object ref = version_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        version_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string url = 2;
+    public static final int URL_FIELD_NUMBER = 2;
+    private java.lang.Object url_;
+    /**
+     * <code>required string url = 2;</code>
+     */
+    public boolean hasUrl() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required string url = 2;</code>
+     */
+    public java.lang.String getUrl() {
+      java.lang.Object ref = url_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          url_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string url = 2;</code>
+     */
+    public com.google.protobuf.ByteString
+        getUrlBytes() {
+      java.lang.Object ref = url_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        url_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string revision = 3;
+    public static final int REVISION_FIELD_NUMBER = 3;
+    private java.lang.Object revision_;
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    public boolean hasRevision() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    public java.lang.String getRevision() {
+      java.lang.Object ref = revision_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          revision_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string revision = 3;</code>
+     */
+    public com.google.protobuf.ByteString
+        getRevisionBytes() {
+      java.lang.Object ref = revision_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        revision_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string user = 4;
+    public static final int USER_FIELD_NUMBER = 4;
+    private java.lang.Object user_;
+    /**
+     * <code>required string user = 4;</code>
+     */
+    public boolean hasUser() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>required string user = 4;</code>
+     */
+    public java.lang.String getUser() {
+      java.lang.Object ref = user_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          user_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string user = 4;</code>
+     */
+    public com.google.protobuf.ByteString
+        getUserBytes() {
+      java.lang.Object ref = user_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        user_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string date = 5;
+    public static final int DATE_FIELD_NUMBER = 5;
+    private java.lang.Object date_;
+    /**
+     * <code>required string date = 5;</code>
+     */
+    public boolean hasDate() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>required string date = 5;</code>
+     */
+    public java.lang.String getDate() {
+      java.lang.Object ref = date_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          date_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string date = 5;</code>
+     */
+    public com.google.protobuf.ByteString
+        getDateBytes() {
+      java.lang.Object ref = date_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        date_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    // required string src_checksum = 6;
+    public static final int SRC_CHECKSUM_FIELD_NUMBER = 6;
+    private java.lang.Object srcChecksum_;
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    public boolean hasSrcChecksum() {
+      return ((bitField0_ & 0x00000020) == 0x00000020);
+    }
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    public java.lang.String getSrcChecksum() {
+      java.lang.Object ref = srcChecksum_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        com.google.protobuf.ByteString bs = 
+            (com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          srcChecksum_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>required string src_checksum = 6;</code>
+     */
+    public com.google.protobuf.ByteString
+        getSrcChecksumBytes() {
+      java.lang.Object ref = srcChecksum_;
+      if (ref instanceof java.lang.String) {
+        com.google.protobuf.ByteString b = 
+            com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        srcChecksum_ = b;
+        return b;
+      } else {
+        return (com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    private void initFields() {
+      version_ = "";
+      url_ = "";
+      revision_ = "";
+      user_ = "";
+      date_ = "";
+      srcChecksum_ = "";
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+
+      if (!hasVersion()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasUrl()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasRevision()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasUser()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasDate()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasSrcChecksum()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeBytes(1, getVersionBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, getUrlBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeBytes(3, getRevisionBytes());
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeBytes(4, getUserBytes());
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeBytes(5, getDateBytes());
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        output.writeBytes(6, getSrcChecksumBytes());
+      }
+      getUnknownFields().writeTo(output);
+    }
+
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(1, getVersionBytes());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, getUrlBytes());
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(3, getRevisionBytes());
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(4, getUserBytes());
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(5, getDateBytes());
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(6, getSrcChecksumBytes());
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo other = (org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) obj;
+
+      boolean result = true;
+      result = result && (hasVersion() == other.hasVersion());
+      if (hasVersion()) {
+        result = result && getVersion()
+            .equals(other.getVersion());
+      }
+      result = result && (hasUrl() == other.hasUrl());
+      if (hasUrl()) {
+        result = result && getUrl()
+            .equals(other.getUrl());
+      }
+      result = result && (hasRevision() == other.hasRevision());
+      if (hasRevision()) {
+        result = result && getRevision()
+            .equals(other.getRevision());
+      }
+      result = result && (hasUser() == other.hasUser());
+      if (hasUser()) {
+        result = result && getUser()
+            .equals(other.getUser());
+      }
+      result = result && (hasDate() == other.hasDate());
+      if (hasDate()) {
+        result = result && getDate()
+            .equals(other.getDate());
+      }
+      result = result && (hasSrcChecksum() == other.hasSrcChecksum());
+      if (hasSrcChecksum()) {
+        result = result && getSrcChecksum()
+            .equals(other.getSrcChecksum());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+
+    private int memoizedHashCode = 0;
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasVersion()) {
+        hash = (37 * hash) + VERSION_FIELD_NUMBER;
+        hash = (53 * hash) + getVersion().hashCode();
+      }
+      if (hasUrl()) {
+        hash = (37 * hash) + URL_FIELD_NUMBER;
+        hash = (53 * hash) + getUrl().hashCode();
+      }
+      if (hasRevision()) {
+        hash = (37 * hash) + REVISION_FIELD_NUMBER;
+        hash = (53 * hash) + getRevision().hashCode();
+      }
+      if (hasUser()) {
+        hash = (37 * hash) + USER_FIELD_NUMBER;
+        hash = (53 * hash) + getUser().hashCode();
+      }
+      if (hasDate()) {
+        hash = (37 * hash) + DATE_FIELD_NUMBER;
+        hash = (53 * hash) + getDate().hashCode();
+      }
+      if (hasSrcChecksum()) {
+        hash = (37 * hash) + SRC_CHECKSUM_FIELD_NUMBER;
+        hash = (53 * hash) + getSrcChecksum().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseDelimitedFrom(input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input);
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return PARSER.parseFrom(input, extensionRegistry);
+    }
+
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code VersionInfo}
+     *
+     * <pre>
+     * Rpc client version info proto. Included in ConnectionHeader on connection setup
+     * </pre>
+     */
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor;
+      }
+
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.class, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+
+      public Builder clear() {
+        super.clear();
+        version_ = "";
+        bitField0_ = (bitField0_ & ~0x00000001);
+        url_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        revision_ = "";
+        bitField0_ = (bitField0_ & ~0x00000004);
+        user_ = "";
+        bitField0_ = (bitField0_ & ~0x00000008);
+        date_ = "";
+        bitField0_ = (bitField0_ & ~0x00000010);
+        srcChecksum_ = "";
+        bitField0_ = (bitField0_ & ~0x00000020);
+        return this;
+      }
+
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.internal_static_VersionInfo_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo build() {
+        org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo result = new org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.version_ = version_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.url_ = url_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.revision_ = revision_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.user_ = user_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.date_ = date_;
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000020;
+        }
+        result.srcChecksum_ = srcChecksum_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance()) return this;
+        if (other.hasVersion()) {
+          bitField0_ |= 0x00000001;
+          version_ = other.version_;
+          onChanged();
+        }
+        if (other.hasUrl()) {
+          bitField0_ |= 0x00000002;
+          url_ = other.url_;
+          onChanged();
+        }
+        if (other.hasRevision()) {
+          bitField0_ |= 0x00000004;
+          revision_ = other.revision_;
+          onChanged();
+        }
+        if (other.hasUser()) {
+          bitField0_ |= 0x00000008;
+          user_ = other.user_;
+          onChanged();
+        }
+        if (other.hasDate()) {
+          bitField0_ |= 0x00000010;
+          date_ = other.date_;
+          onChanged();
+        }
+        if (other.hasSrcChecksum()) {
+          bitField0_ |= 0x00000020;
+          srcChecksum_ = other.srcChecksum_;
+          onChanged();
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasVersion()) {
+          
+          return false;
+        }
+        if (!hasUrl()) {
+          
+          return false;
+        }
+        if (!hasRevision()) {
+          
+          return false;
+        }
+        if (!hasUser()) {
+          
+          return false;
+        }
+        if (!hasDate()) {
+          
+          return false;
+        }
+        if (!hasSrcChecksum()) {
+          
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo) e.getUnfinishedMessage();
+          throw e;
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      // required string version = 1;
+      private java.lang.Object version_ = "";
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public boolean hasVersion() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public java.lang.String getVersion() {
+        java.lang.Object ref = version_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          version_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public com.google.protobuf.ByteString
+          getVersionBytes() {
+        java.lang.Object ref = version_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          version_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public Builder setVersion(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        version_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public Builder clearVersion() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        version_ = getDefaultInstance().getVersion();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string version = 1;</code>
+       */
+      public Builder setVersionBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000001;
+        version_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string url = 2;
+      private java.lang.Object url_ = "";
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public boolean hasUrl() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public java.lang.String getUrl() {
+        java.lang.Object ref = url_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          url_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public com.google.protobuf.ByteString
+          getUrlBytes() {
+        java.lang.Object ref = url_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          url_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public Builder setUrl(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        url_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public Builder clearUrl() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        url_ = getDefaultInstance().getUrl();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string url = 2;</code>
+       */
+      public Builder setUrlBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        url_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string revision = 3;
+      private java.lang.Object revision_ = "";
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public boolean hasRevision() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public java.lang.String getRevision() {
+        java.lang.Object ref = revision_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          revision_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public com.google.protobuf.ByteString
+          getRevisionBytes() {
+        java.lang.Object ref = revision_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          revision_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public Builder setRevision(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        revision_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public Builder clearRevision() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        revision_ = getDefaultInstance().getRevision();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string revision = 3;</code>
+       */
+      public Builder setRevisionBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000004;
+        revision_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string user = 4;
+      private java.lang.Object user_ = "";
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public boolean hasUser() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public java.lang.String getUser() {
+        java.lang.Object ref = user_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          user_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public com.google.protobuf.ByteString
+          getUserBytes() {
+        java.lang.Object ref = user_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          user_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public Builder setUser(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000008;
+        user_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public Builder clearUser() {
+        bitField0_ = (bitField0_ & ~0x00000008);
+        user_ = getDefaultInstance().getUser();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string user = 4;</code>
+       */
+      public Builder setUserBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000008;
+        user_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string date = 5;
+      private java.lang.Object date_ = "";
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public boolean hasDate() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public java.lang.String getDate() {
+        java.lang.Object ref = date_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          date_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public com.google.protobuf.ByteString
+          getDateBytes() {
+        java.lang.Object ref = date_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          date_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public Builder setDate(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        date_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public Builder clearDate() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        date_ = getDefaultInstance().getDate();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string date = 5;</code>
+       */
+      public Builder setDateBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        date_ = value;
+        onChanged();
+        return this;
+      }
+
+      // required string src_checksum = 6;
+      private java.lang.Object srcChecksum_ = "";
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public boolean hasSrcChecksum() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public java.lang.String getSrcChecksum() {
+        java.lang.Object ref = srcChecksum_;
+        if (!(ref instanceof java.lang.String)) {
+          java.lang.String s = ((com.google.protobuf.ByteString) ref)
+              .toStringUtf8();
+          srcChecksum_ = s;
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public com.google.protobuf.ByteString
+          getSrcChecksumBytes() {
+        java.lang.Object ref = srcChecksum_;
+        if (ref instanceof String) {
+          com.google.protobuf.ByteString b = 
+              com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          srcChecksum_ = b;
+          return b;
+        } else {
+          return (com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public Builder setSrcChecksum(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000020;
+        srcChecksum_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public Builder clearSrcChecksum() {
+        bitField0_ = (bitField0_ & ~0x00000020);
+        srcChecksum_ = getDefaultInstance().getSrcChecksum();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required string src_checksum = 6;</code>
+       */
+      public Builder setSrcChecksumBytes(
+          com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000020;
+        srcChecksum_ = value;
+        onChanged();
+        return this;
+      }
+
+      // @@protoc_insertion_point(builder_scope:VersionInfo)
+    }
+
+    static {
+      defaultInstance = new VersionInfo(true);
+      defaultInstance.initFields();
+    }
+
+    // @@protoc_insertion_point(class_scope:VersionInfo)
+  }
+
   public interface ConnectionHeaderOrBuilder
       extends com.google.protobuf.MessageOrBuilder {
 
@@ -789,6 +2179,20 @@ public final class RPCProtos {
      */
     com.google.protobuf.ByteString
         getCellBlockCompressorClassBytes();
+
+    // optional .VersionInfo version_info = 5;
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    boolean hasVersionInfo();
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo();
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder();
   }
   /**
    * Protobuf type {@code ConnectionHeader}
@@ -873,6 +2277,19 @@ public final class RPCProtos {
               cellBlockCompressorClass_ = input.readBytes();
               break;
             }
+            case 42: {
+              org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000010) == 0x00000010)) {
+                subBuilder = versionInfo_.toBuilder();
+              }
+              versionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(versionInfo_);
+                versionInfo_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000010;
+              break;
+            }
           }
         }
       } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -1094,11 +2511,34 @@ public final class RPCProtos {
       }
     }
 
+    // optional .VersionInfo version_info = 5;
+    public static final int VERSION_INFO_FIELD_NUMBER = 5;
+    private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo versionInfo_;
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    public boolean hasVersionInfo() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo() {
+      return versionInfo_;
+    }
+    /**
+     * <code>optional .VersionInfo version_info = 5;</code>
+     */
+    public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() {
+      return versionInfo_;
+    }
+
     private void initFields() {
       userInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.UserInformation.getDefaultInstance();
       serviceName_ = "";
       cellBlockCodecClass_ = "";
       cellBlockCompressorClass_ = "";
+      versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance();
     }
     private byte memoizedIsInitialized = -1;
     public final boolean isInitialized() {
@@ -1111,6 +2551,12 @@ public final class RPCProtos {
           return false;
         }
       }
+      if (hasVersionInfo()) {
+        if (!getVersionInfo().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
       memoizedIsInitialized = 1;
       return true;
     }
@@ -1130,6 +2576,9 @@ public final class RPCProtos {
       if (((bitField0_ & 0x00000008) == 0x00000008)) {
         output.writeBytes(4, getCellBlockCompressorClassBytes());
       }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeMessage(5, versionInfo_);
+      }
       getUnknownFields().writeTo(output);
     }
 
@@ -1155,6 +2604,10 @@ public final class RPCProtos {
         size += com.google.protobuf.CodedOutputStream
           .computeBytesSize(4, getCellBlockCompressorClassBytes());
       }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(5, versionInfo_);
+      }
       size += getUnknownFields().getSerializedSize();
       memoizedSerializedSize = size;
       return size;
@@ -1198,6 +2651,11 @@ public final class RPCProtos {
         result = result && getCellBlockCompressorClass()
             .equals(other.getCellBlockCompressorClass());
       }
+      result = result && (hasVersionInfo() == other.hasVersionInfo());
+      if (hasVersionInfo()) {
+        result = result && getVersionInfo()
+            .equals(other.getVersionInfo());
+      }
       result = result &&
           getUnknownFields().equals(other.getUnknownFields());
       return result;
@@ -1227,6 +2685,10 @@ public final class RPCProtos {
         hash = (37 * hash) + CELL_BLOCK_COMPRESSOR_CLASS_FIELD_NUMBER;
         hash = (53 * hash) + getCellBlockCompressorClass().hashCode();
       }
+      if (hasVersionInfo()) {
+        hash = (37 * hash) + VERSION_INFO_FIELD_NUMBER;
+        hash = (53 * hash) + getVersionInfo().hashCode();
+      }
       hash = (29 * hash) + getUnknownFields().hashCode();
       memoizedHashCode = hash;
       return hash;
@@ -1333,6 +2795,7 @@ public final class RPCProtos {
       private void maybeForceBuilderInitialization() {
         if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
           getUserInfoFieldBuilder();
+          getVersionInfoFieldBuilder();
         }
       }
       private static Builder create() {
@@ -1353,6 +2816,12 @@ public final class RPCProtos {
         bitField0_ = (bitField0_ & ~0x00000004);
         cellBlockCompressorClass_ = "";
         bitField0_ = (bitField0_ & ~0x00000008);
+        if (versionInfoBuilder_ == null) {
+          versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance();
+        } else {
+          versionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000010);
         return this;
       }
 
@@ -1401,6 +2870,14 @@ public final class RPCProtos {
           to_bitField0_ |= 0x00000008;
         }
         result.cellBlockCompressorClass_ = cellBlockCompressorClass_;
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        if (versionInfoBuilder_ == null) {
+          result.versionInfo_ = versionInfo_;
+        } else {
+          result.versionInfo_ = versionInfoBuilder_.build();
+        }
         result.bitField0_ = to_bitField0_;
         onBuilt();
         return result;
@@ -1435,6 +2912,9 @@ public final class RPCProtos {
           cellBlockCompressorClass_ = other.cellBlockCompressorClass_;
           onChanged();
         }
+        if (other.hasVersionInfo()) {
+          mergeVersionInfo(other.getVersionInfo());
+        }
         this.mergeUnknownFields(other.getUnknownFields());
         return this;
       }
@@ -1446,6 +2926,12 @@ public final class RPCProtos {
             return false;
           }
         }
+        if (hasVersionInfo()) {
+          if (!getVersionInfo().isInitialized()) {
+            
+            return false;
+          }
+        }
         return true;
       }
 
@@ -1867,6 +3353,123 @@ public final class RPCProtos {
         return this;
       }
 
+      // optional .VersionInfo version_info = 5;
+      private org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder> versionInfoBuilder_;
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public boolean hasVersionInfo() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo getVersionInfo() {
+        if (versionInfoBuilder_ == null) {
+          return versionInfo_;
+        } else {
+          return versionInfoBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public Builder setVersionInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo value) {
+        if (versionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          versionInfo_ = value;
+          onChanged();
+        } else {
+          versionInfoBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public Builder setVersionInfo(
+          org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder builderForValue) {
+        if (versionInfoBuilder_ == null) {
+          versionInfo_ = builderForValue.build();
+          onChanged();
+        } else {
+          versionInfoBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public Builder mergeVersionInfo(org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo value) {
+        if (versionInfoBuilder_ == null) {
+          if (((bitField0_ & 0x00000010) == 0x00000010) &&
+              versionInfo_ != org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance()) {
+            versionInfo_ =
+              org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.newBuilder(versionInfo_).mergeFrom(value).buildPartial();
+          } else {
+            versionInfo_ = value;
+          }
+          onChanged();
+        } else {
+          versionInfoBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000010;
+        return this;
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public Builder clearVersionInfo() {
+        if (versionInfoBuilder_ == null) {
+          versionInfo_ = org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.getDefaultInstance();
+          onChanged();
+        } else {
+          versionInfoBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000010);
+        return this;
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder getVersionInfoBuilder() {
+        bitField0_ |= 0x00000010;
+        onChanged();
+        return getVersionInfoFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      public org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder getVersionInfoOrBuilder() {
+        if (versionInfoBuilder_ != null) {
+          return versionInfoBuilder_.getMessageOrBuilder();
+        } else {
+          return versionInfo_;
+        }
+      }
+      /**
+       * <code>optional .VersionInfo version_info = 5;</code>
+       */
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder> 
+          getVersionInfoFieldBuilder() {
+        if (versionInfoBuilder_ == null) {
+          versionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.RPCProtos.VersionInfoOrBuilder>(
+                  versionInfo_,
+                  getParentForChildren(),
+                  isClean());
+          versionInfo_ = null;
+        }
+        return versionInfoBuilder_;
+      }
+
       // @@protoc_insertion_point(builder_scope:ConnectionHeader)
     }
 
@@ -5884,6 +7487,11 @@ public final class RPCProtos {
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_UserInformation_fieldAccessorTable;
   private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_VersionInfo_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_VersionInfo_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
     internal_static_ConnectionHeader_descriptor;
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -5919,23 +7527,27 @@ public final class RPCProtos {
     java.lang.String[] descriptorData = {
       "\n\tRPC.proto\032\rTracing.proto\032\013HBase.proto\"" +
       "<\n\017UserInformation\022\026\n\016effective_user\030\001 \002" +
-      "(\t\022\021\n\treal_user\030\002 \001(\t\"\222\001\n\020ConnectionHead" +
-      "er\022#\n\tuser_info\030\001 \001(\0132\020.UserInformation\022" +
-      "\024\n\014service_name\030\002 \001(\t\022\036\n\026cell_block_code" +
-      "c_class\030\003 \001(\t\022#\n\033cell_block_compressor_c" +
-      "lass\030\004 \001(\t\"\037\n\rCellBlockMeta\022\016\n\006length\030\001 " +
-      "\001(\r\"|\n\021ExceptionResponse\022\034\n\024exception_cl" +
-      "ass_name\030\001 \001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010h" +
-      "ostname\030\003 \001(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_re",
-      "try\030\005 \001(\010\"\246\001\n\rRequestHeader\022\017\n\007call_id\030\001" +
-      " \001(\r\022\035\n\ntrace_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013m" +
-      "ethod_name\030\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022" +
-      "\'\n\017cell_block_meta\030\005 \001(\0132\016.CellBlockMeta" +
-      "\022\020\n\010priority\030\006 \001(\r\"q\n\016ResponseHeader\022\017\n\007" +
-      "call_id\030\001 \001(\r\022%\n\texception\030\002 \001(\0132\022.Excep" +
-      "tionResponse\022\'\n\017cell_block_meta\030\003 \001(\0132\016." +
-      "CellBlockMetaB<\n*org.apache.hadoop.hbase" +
-      ".protobuf.generatedB\tRPCProtosH\001\240\001\001"
+      "(\t\022\021\n\treal_user\030\002 \001(\t\"o\n\013VersionInfo\022\017\n\007" +
+      "version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020\n\010revision\030\003" +
+      " \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030\005 \002(\t\022\024\n\014src_" +
+      "checksum\030\006 \002(\t\"\266\001\n\020ConnectionHeader\022#\n\tu" +
+      "ser_info\030\001 \001(\0132\020.UserInformation\022\024\n\014serv" +
+      "ice_name\030\002 \001(\t\022\036\n\026cell_block_codec_class" +
+      "\030\003 \001(\t\022#\n\033cell_block_compressor_class\030\004 " +
+      "\001(\t\022\"\n\014version_info\030\005 \001(\0132\014.VersionInfo\"",
+      "\037\n\rCellBlockMeta\022\016\n\006length\030\001 \001(\r\"|\n\021Exce" +
+      "ptionResponse\022\034\n\024exception_class_name\030\001 " +
+      "\001(\t\022\023\n\013stack_trace\030\002 \001(\t\022\020\n\010hostname\030\003 \001" +
+      "(\t\022\014\n\004port\030\004 \001(\005\022\024\n\014do_not_retry\030\005 \001(\010\"\246" +
+      "\001\n\rRequestHeader\022\017\n\007call_id\030\001 \001(\r\022\035\n\ntra" +
+      "ce_info\030\002 \001(\0132\t.RPCTInfo\022\023\n\013method_name\030" +
+      "\003 \001(\t\022\025\n\rrequest_param\030\004 \001(\010\022\'\n\017cell_blo" +
+      "ck_meta\030\005 \001(\0132\016.CellBlockMeta\022\020\n\010priorit" +
+      "y\030\006 \001(\r\"q\n\016ResponseHeader\022\017\n\007call_id\030\001 \001" +
+      "(\r\022%\n\texception\030\002 \001(\0132\022.ExceptionRespons",
+      "e\022\'\n\017cell_block_meta\030\003 \001(\0132\016.CellBlockMe" +
+      "taB<\n*org.apache.hadoop.hbase.protobuf.g" +
+      "eneratedB\tRPCProtosH\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -5948,32 +7560,38 @@ public final class RPCProtos {
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_UserInformation_descriptor,
               new java.lang.String[] { "EffectiveUser", "RealUser", });
-          internal_static_ConnectionHeader_descriptor =
+          internal_static_VersionInfo_descriptor =
             getDescriptor().getMessageTypes().get(1);
+          internal_static_VersionInfo_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_VersionInfo_descriptor,
+              new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", });
+          internal_static_ConnectionHeader_descriptor =
+            getDescriptor().getMessageTypes().get(2);
           internal_static_ConnectionHeader_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ConnectionHeader_descriptor,
-              new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", });
+              new java.lang.String[] { "UserInfo", "ServiceName", "CellBlockCodecClass", "CellBlockCompressorClass", "VersionInfo", });
           internal_static_CellBlockMeta_descriptor =
-            getDescriptor().getMessageTypes().get(2);
+            getDescriptor().getMessageTypes().get(3);
           internal_static_CellBlockMeta_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_CellBlockMeta_descriptor,
               new java.lang.String[] { "Length", });
           internal_static_ExceptionResponse_descriptor =
-            getDescriptor().getMessageTypes().get(3);
+            getDescriptor().getMessageTypes().get(4);
           internal_static_ExceptionResponse_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ExceptionResponse_descriptor,
               new java.lang.String[] { "ExceptionClassName", "StackTrace", "Hostname", "Port", "DoNotRetry", });
           internal_static_RequestHeader_descriptor =
-            getDescriptor().getMessageTypes().get(4);
+            getDescriptor().getMessageTypes().get(5);
           internal_static_RequestHeader_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_RequestHeader_descriptor,
               new java.lang.String[] { "CallId", "TraceInfo", "MethodName", "RequestParam", "CellBlockMeta", "Priority", });
           internal_static_ResponseHeader_descriptor =
-            getDescriptor().getMessageTypes().get(5);
+            getDescriptor().getMessageTypes().get(6);
           internal_static_ResponseHeader_fieldAccessorTable = new
             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
               internal_static_ResponseHeader_descriptor,

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-protocol/src/main/protobuf/RPC.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/RPC.proto b/hbase-protocol/src/main/protobuf/RPC.proto
index 9bf69a1..d862aec 100644
--- a/hbase-protocol/src/main/protobuf/RPC.proto
+++ b/hbase-protocol/src/main/protobuf/RPC.proto
@@ -76,6 +76,16 @@ message UserInformation {
   optional string real_user = 2;
 }
 
+// Rpc client version info proto. Included in ConnectionHeader on connection setup
+message VersionInfo {
+  required string version = 1;
+  required string url = 2;
+  required string revision = 3;
+  required string user = 4;
+  required string date = 5;
+  required string src_checksum = 6;
+}
+
 // This is sent on connection setup after the connection preamble is sent.
 message ConnectionHeader {
   optional UserInformation user_info = 1;
@@ -86,6 +96,7 @@ message ConnectionHeader {
   // Compressor we will use if cell block is compressed.  Server will throw exception if not supported.
   // Class must implement hadoop's CompressionCodec Interface.  Can't compress if no codec.
   optional string cell_block_compressor_class = 4;
+  optional VersionInfo version_info = 5;
 }
 
 // Optional Cell block Message.  Included in client RequestHeader

http://git-wip-us.apache.org/repos/asf/hbase/blob/0170e79e/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 32d0fbf..5668f83 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1641,6 +1641,14 @@ public class RpcServer implements RpcServerInterface {
           }
         }
       }
+      if (connectionHeader.hasVersionInfo()) {
+        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
+            + " with version info: "
+            + TextFormat.shortDebugString(connectionHeader.getVersionInfo()));
+      } else {
+        AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
+            + " with unknown version info");
+      }
     }
 
     /**


[4/6] hbase git commit: HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load

Posted by ap...@apache.org.
HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a17a3607
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a17a3607
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a17a3607

Branch: refs/heads/branch-1.0
Commit: a17a3607ed76b4d99ee24b6aece1354cfa071c4b
Parents: dac1dc7
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Apr 30 12:54:55 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 13:59:49 2015 -0700

----------------------------------------------------------------------
 .../regionserver/RegionCoprocessorHost.java     |   8 +-
 .../util/BoundedConcurrentLinkedQueue.java      | 114 +++++++++++++++++++
 .../util/TestBoundedConcurrentLinkedQueue.java  |  85 ++++++++++++++
 3 files changed, 203 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/a17a3607/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 83aea3e..afb3ec1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
@@ -78,6 +76,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hbase.util.Pair;
@@ -102,6 +101,7 @@ public class RegionCoprocessorHost
       new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK);
 
   /**
+   * 
    * Encapsulation of the environment of each coprocessor
    */
   static class RegionEnvironment extends CoprocessorHost.Environment
@@ -111,8 +111,8 @@ public class RegionCoprocessorHost
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
-    private final BlockingQueue<Long> coprocessorTimeNanos = new ArrayBlockingQueue<Long>(
-        LATENCY_BUFFER_SIZE);
+    private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
+        new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
     private final boolean useLegacyPre;
     private final boolean useLegacyPost;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/a17a3607/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..9208238
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A ConcurrentLinkedQueue that enforces a maximum queue size.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class BoundedConcurrentLinkedQueue<T> extends ConcurrentLinkedQueue<T> {
+  private static final long serialVersionUID = 1L;
+  private volatile long size = 0;
+  private final long maxSize;
+
+  public BoundedConcurrentLinkedQueue() {
+    this(Long.MAX_VALUE);
+  }
+
+  public BoundedConcurrentLinkedQueue(long maxSize) {
+    super();
+    this.maxSize = maxSize;
+  }
+
+  @Override
+  public boolean add(T e) {
+    return offer(e);
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends T> c) {
+    size += c.size();        // Between here and below we might reject offers,
+    if (size > maxSize) {    // if over maxSize, but that's ok
+      size -= c.size();      // We're over, just back out and return.
+      return false;
+    }
+    return super.addAll(c);  // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public void clear() {
+    super.clear();
+    size = 0;
+  }
+
+  @Override
+  public boolean offer(T e) {
+    if (++size > maxSize) {
+      --size;                // We didn't take it after all
+      return false;
+    }
+    return super.offer(e);   // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public T poll() {
+    T result = super.poll();
+    if (result != null) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public boolean remove(Object o) {
+    boolean result = super.remove(o);
+    if (result) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public int size() {
+    return (int) size;
+  }
+
+  public void drainTo(Collection<T> list) {
+    long removed = 0;
+    T l;
+    while ((l = super.poll()) != null) {
+      list.add(l);
+      removed++;
+    }
+    // Limit the number of operations on a volatile by only reporting size
+    // change after the drain is completed.
+    size -= removed;
+  }
+
+  public long remainingCapacity() {
+    long remaining = maxSize - size;
+    return remaining >= 0 ? remaining : 0;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/a17a3607/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..5972a87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBoundedConcurrentLinkedQueue {
+  private final static int CAPACITY = 16;
+
+  private BoundedConcurrentLinkedQueue<Long> queue;
+
+  @Before
+  public void setUp() throws Exception {
+    this.queue = new BoundedConcurrentLinkedQueue<Long>(CAPACITY);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testOfferAndPoll() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Poll
+    for (int i = 1; i <= CAPACITY; ++i) {
+      long l = queue.poll();
+      assertEquals(i, l);
+      assertEquals(CAPACITY - i, queue.size());
+      assertEquals(i, queue.remainingCapacity());
+    }
+    assertEquals(null, queue.poll());
+  }
+
+  @Test
+  public void testDrain() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Drain
+    List<Long> list = new ArrayList<Long>();
+    queue.drainTo(list);
+    assertEquals(null, queue.poll());
+    assertEquals(0, queue.size());
+    assertEquals(CAPACITY, queue.remainingCapacity());
+  }
+}


[5/6] hbase git commit: HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load

Posted by ap...@apache.org.
HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/786a413e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/786a413e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/786a413e

Branch: refs/heads/branch-1.1
Commit: 786a413ec9c7b29311a49d0afb972fad7b4d3340
Parents: 94a29e2
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Apr 30 12:54:55 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 14:03:59 2015 -0700

----------------------------------------------------------------------
 .../regionserver/RegionCoprocessorHost.java     |   8 +-
 .../util/BoundedConcurrentLinkedQueue.java      | 114 +++++++++++++++++++
 .../util/TestBoundedConcurrentLinkedQueue.java  |  85 ++++++++++++++
 3 files changed, 203 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/786a413e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index badf944..92d10e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
@@ -78,6 +76,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hbase.util.Pair;
@@ -102,6 +101,7 @@ public class RegionCoprocessorHost
       new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK);
 
   /**
+   * 
    * Encapsulation of the environment of each coprocessor
    */
   static class RegionEnvironment extends CoprocessorHost.Environment
@@ -111,8 +111,8 @@ public class RegionCoprocessorHost
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
-    private final BlockingQueue<Long> coprocessorTimeNanos = new ArrayBlockingQueue<Long>(
-        LATENCY_BUFFER_SIZE);
+    private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
+        new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
     private final boolean useLegacyPre;
     private final boolean useLegacyPost;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/786a413e/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..9208238
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A ConcurrentLinkedQueue that enforces a maximum queue size.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class BoundedConcurrentLinkedQueue<T> extends ConcurrentLinkedQueue<T> {
+  private static final long serialVersionUID = 1L;
+  private volatile long size = 0;
+  private final long maxSize;
+
+  public BoundedConcurrentLinkedQueue() {
+    this(Long.MAX_VALUE);
+  }
+
+  public BoundedConcurrentLinkedQueue(long maxSize) {
+    super();
+    this.maxSize = maxSize;
+  }
+
+  @Override
+  public boolean add(T e) {
+    return offer(e);
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends T> c) {
+    size += c.size();        // Between here and below we might reject offers,
+    if (size > maxSize) {    // if over maxSize, but that's ok
+      size -= c.size();      // We're over, just back out and return.
+      return false;
+    }
+    return super.addAll(c);  // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public void clear() {
+    super.clear();
+    size = 0;
+  }
+
+  @Override
+  public boolean offer(T e) {
+    if (++size > maxSize) {
+      --size;                // We didn't take it after all
+      return false;
+    }
+    return super.offer(e);   // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public T poll() {
+    T result = super.poll();
+    if (result != null) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public boolean remove(Object o) {
+    boolean result = super.remove(o);
+    if (result) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public int size() {
+    return (int) size;
+  }
+
+  public void drainTo(Collection<T> list) {
+    long removed = 0;
+    T l;
+    while ((l = super.poll()) != null) {
+      list.add(l);
+      removed++;
+    }
+    // Limit the number of operations on a volatile by only reporting size
+    // change after the drain is completed.
+    size -= removed;
+  }
+
+  public long remainingCapacity() {
+    long remaining = maxSize - size;
+    return remaining >= 0 ? remaining : 0;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/786a413e/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..5972a87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBoundedConcurrentLinkedQueue {
+  private final static int CAPACITY = 16;
+
+  private BoundedConcurrentLinkedQueue<Long> queue;
+
+  @Before
+  public void setUp() throws Exception {
+    this.queue = new BoundedConcurrentLinkedQueue<Long>(CAPACITY);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testOfferAndPoll() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Poll
+    for (int i = 1; i <= CAPACITY; ++i) {
+      long l = queue.poll();
+      assertEquals(i, l);
+      assertEquals(CAPACITY - i, queue.size());
+      assertEquals(i, queue.remainingCapacity());
+    }
+    assertEquals(null, queue.poll());
+  }
+
+  @Test
+  public void testDrain() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Drain
+    List<Long> list = new ArrayList<Long>();
+    queue.drainTo(list);
+    assertEquals(null, queue.poll());
+    assertEquals(0, queue.size());
+    assertEquals(CAPACITY, queue.remainingCapacity());
+  }
+}


[6/6] hbase git commit: HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load

Posted by ap...@apache.org.
HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6d40b547
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6d40b547
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6d40b547

Branch: refs/heads/branch-1
Commit: 6d40b547aea3046d5d05760c1ced6f62f59e1448
Parents: e69e55b
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Apr 30 12:54:55 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 14:06:39 2015 -0700

----------------------------------------------------------------------
 .../regionserver/RegionCoprocessorHost.java     |   8 +-
 .../util/BoundedConcurrentLinkedQueue.java      | 114 +++++++++++++++++++
 .../util/TestBoundedConcurrentLinkedQueue.java  |  85 ++++++++++++++
 3 files changed, 203 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6d40b547/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index badf944..92d10e8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
@@ -78,6 +76,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hbase.util.Pair;
@@ -102,6 +101,7 @@ public class RegionCoprocessorHost
       new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK);
 
   /**
+   * 
    * Encapsulation of the environment of each coprocessor
    */
   static class RegionEnvironment extends CoprocessorHost.Environment
@@ -111,8 +111,8 @@ public class RegionCoprocessorHost
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
-    private final BlockingQueue<Long> coprocessorTimeNanos = new ArrayBlockingQueue<Long>(
-        LATENCY_BUFFER_SIZE);
+    private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
+        new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
     private final boolean useLegacyPre;
     private final boolean useLegacyPost;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d40b547/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..9208238
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A ConcurrentLinkedQueue that enforces a maximum queue size.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class BoundedConcurrentLinkedQueue<T> extends ConcurrentLinkedQueue<T> {
+  private static final long serialVersionUID = 1L;
+  private volatile long size = 0;
+  private final long maxSize;
+
+  public BoundedConcurrentLinkedQueue() {
+    this(Long.MAX_VALUE);
+  }
+
+  public BoundedConcurrentLinkedQueue(long maxSize) {
+    super();
+    this.maxSize = maxSize;
+  }
+
+  @Override
+  public boolean add(T e) {
+    return offer(e);
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends T> c) {
+    size += c.size();        // Between here and below we might reject offers,
+    if (size > maxSize) {    // if over maxSize, but that's ok
+      size -= c.size();      // We're over, just back out and return.
+      return false;
+    }
+    return super.addAll(c);  // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public void clear() {
+    super.clear();
+    size = 0;
+  }
+
+  @Override
+  public boolean offer(T e) {
+    if (++size > maxSize) {
+      --size;                // We didn't take it after all
+      return false;
+    }
+    return super.offer(e);   // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public T poll() {
+    T result = super.poll();
+    if (result != null) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public boolean remove(Object o) {
+    boolean result = super.remove(o);
+    if (result) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public int size() {
+    return (int) size;
+  }
+
+  public void drainTo(Collection<T> list) {
+    long removed = 0;
+    T l;
+    while ((l = super.poll()) != null) {
+      list.add(l);
+      removed++;
+    }
+    // Limit the number of operations on a volatile by only reporting size
+    // change after the drain is completed.
+    size -= removed;
+  }
+
+  public long remainingCapacity() {
+    long remaining = maxSize - size;
+    return remaining >= 0 ? remaining : 0;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/6d40b547/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..5972a87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBoundedConcurrentLinkedQueue {
+  private final static int CAPACITY = 16;
+
+  private BoundedConcurrentLinkedQueue<Long> queue;
+
+  @Before
+  public void setUp() throws Exception {
+    this.queue = new BoundedConcurrentLinkedQueue<Long>(CAPACITY);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testOfferAndPoll() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Poll
+    for (int i = 1; i <= CAPACITY; ++i) {
+      long l = queue.poll();
+      assertEquals(i, l);
+      assertEquals(CAPACITY - i, queue.size());
+      assertEquals(i, queue.remainingCapacity());
+    }
+    assertEquals(null, queue.poll());
+  }
+
+  @Test
+  public void testDrain() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Drain
+    List<Long> list = new ArrayList<Long>();
+    queue.drainTo(list);
+    assertEquals(null, queue.poll());
+    assertEquals(0, queue.size());
+    assertEquals(CAPACITY, queue.remainingCapacity());
+  }
+}


[3/6] hbase git commit: HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load

Posted by ap...@apache.org.
HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f3de8a1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f3de8a1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f3de8a1

Branch: refs/heads/0.98
Commit: 0f3de8a17662b3305fc862c1c5b46f71206e3272
Parents: 0170e79
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Apr 30 12:56:20 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 12:56:20 2015 -0700

----------------------------------------------------------------------
 .../regionserver/RegionCoprocessorHost.java     |   8 +-
 .../util/BoundedConcurrentLinkedQueue.java      | 114 +++++++++++++++++++
 .../util/TestBoundedConcurrentLinkedQueue.java  |  85 ++++++++++++++
 3 files changed, 203 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f3de8a1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index f45e89c..08aa8b2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
@@ -77,6 +75,7 @@ import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hbase.util.Pair;
@@ -101,6 +100,7 @@ public class RegionCoprocessorHost
       new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK);
 
   /**
+   * 
    * Encapsulation of the environment of each coprocessor
    */
   static class RegionEnvironment extends CoprocessorHost.Environment
@@ -110,8 +110,8 @@ public class RegionCoprocessorHost
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
-    private final BlockingQueue<Long> coprocessorTimeNanos = new ArrayBlockingQueue<Long>(
-        LATENCY_BUFFER_SIZE);
+    private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
+        new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
 
     /**
      * Constructor

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f3de8a1/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..9208238
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A ConcurrentLinkedQueue that enforces a maximum queue size.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class BoundedConcurrentLinkedQueue<T> extends ConcurrentLinkedQueue<T> {
+  private static final long serialVersionUID = 1L;
+  private volatile long size = 0;
+  private final long maxSize;
+
+  public BoundedConcurrentLinkedQueue() {
+    this(Long.MAX_VALUE);
+  }
+
+  public BoundedConcurrentLinkedQueue(long maxSize) {
+    super();
+    this.maxSize = maxSize;
+  }
+
+  @Override
+  public boolean add(T e) {
+    return offer(e);
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends T> c) {
+    size += c.size();        // Between here and below we might reject offers,
+    if (size > maxSize) {    // if over maxSize, but that's ok
+      size -= c.size();      // We're over, just back out and return.
+      return false;
+    }
+    return super.addAll(c);  // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public void clear() {
+    super.clear();
+    size = 0;
+  }
+
+  @Override
+  public boolean offer(T e) {
+    if (++size > maxSize) {
+      --size;                // We didn't take it after all
+      return false;
+    }
+    return super.offer(e);   // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public T poll() {
+    T result = super.poll();
+    if (result != null) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public boolean remove(Object o) {
+    boolean result = super.remove(o);
+    if (result) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public int size() {
+    return (int) size;
+  }
+
+  public void drainTo(Collection<T> list) {
+    long removed = 0;
+    T l;
+    while ((l = super.poll()) != null) {
+      list.add(l);
+      removed++;
+    }
+    // Limit the number of operations on a volatile by only reporting size
+    // change after the drain is completed.
+    size -= removed;
+  }
+
+  public long remainingCapacity() {
+    long remaining = maxSize - size;
+    return remaining >= 0 ? remaining : 0;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f3de8a1/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..5972a87
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestBoundedConcurrentLinkedQueue {
+  private final static int CAPACITY = 16;
+
+  private BoundedConcurrentLinkedQueue<Long> queue;
+
+  @Before
+  public void setUp() throws Exception {
+    this.queue = new BoundedConcurrentLinkedQueue<Long>(CAPACITY);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testOfferAndPoll() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Poll
+    for (int i = 1; i <= CAPACITY; ++i) {
+      long l = queue.poll();
+      assertEquals(i, l);
+      assertEquals(CAPACITY - i, queue.size());
+      assertEquals(i, queue.remainingCapacity());
+    }
+    assertEquals(null, queue.poll());
+  }
+
+  @Test
+  public void testDrain() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Drain
+    List<Long> list = new ArrayList<Long>();
+    queue.drainTo(list);
+    assertEquals(null, queue.poll());
+    assertEquals(0, queue.size());
+    assertEquals(CAPACITY, queue.remainingCapacity());
+  }
+}


[2/6] hbase git commit: HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load

Posted by ap...@apache.org.
HBASE-13420 RegionEnvironment.offerExecutionLatency Blocks Threads under Heavy Load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81e793e5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81e793e5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81e793e5

Branch: refs/heads/master
Commit: 81e793e582f93af6edf53620d49de6fa4bb21a6f
Parents: 51ce568
Author: Andrew Purtell <ap...@apache.org>
Authored: Thu Apr 30 12:54:28 2015 -0700
Committer: Andrew Purtell <ap...@apache.org>
Committed: Thu Apr 30 12:54:28 2015 -0700

----------------------------------------------------------------------
 .../regionserver/RegionCoprocessorHost.java     |   8 +-
 .../util/BoundedConcurrentLinkedQueue.java      | 114 +++++++++++++++++++
 .../util/TestBoundedConcurrentLinkedQueue.java  |  86 ++++++++++++++
 3 files changed, 204 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/81e793e5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index 6e23952..8d7555c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -27,8 +27,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableSet;
 import java.util.UUID;
-import java.util.concurrent.ArrayBlockingQueue;
-import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.regex.Matcher;
@@ -82,6 +80,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
 import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
 import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.BoundedConcurrentLinkedQueue;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CoprocessorClassLoader;
 import org.apache.hadoop.hbase.util.Pair;
@@ -101,6 +100,7 @@ public class RegionCoprocessorHost
       new ReferenceMap(AbstractReferenceMap.HARD, AbstractReferenceMap.WEAK);
 
   /**
+   * 
    * Encapsulation of the environment of each coprocessor
    */
   static class RegionEnvironment extends CoprocessorHost.Environment
@@ -110,8 +110,8 @@ public class RegionCoprocessorHost
     private RegionServerServices rsServices;
     ConcurrentMap<String, Object> sharedData;
     private static final int LATENCY_BUFFER_SIZE = 100;
-    private final BlockingQueue<Long> coprocessorTimeNanos = new ArrayBlockingQueue<Long>(
-        LATENCY_BUFFER_SIZE);
+    private final BoundedConcurrentLinkedQueue<Long> coprocessorTimeNanos =
+        new BoundedConcurrentLinkedQueue<Long>(LATENCY_BUFFER_SIZE);
     private final boolean useLegacyPre;
     private final boolean useLegacyPost;
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/81e793e5/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..9208238
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BoundedConcurrentLinkedQueue.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * A ConcurrentLinkedQueue that enforces a maximum queue size.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class BoundedConcurrentLinkedQueue<T> extends ConcurrentLinkedQueue<T> {
+  private static final long serialVersionUID = 1L;
+  private volatile long size = 0;
+  private final long maxSize;
+
+  public BoundedConcurrentLinkedQueue() {
+    this(Long.MAX_VALUE);
+  }
+
+  public BoundedConcurrentLinkedQueue(long maxSize) {
+    super();
+    this.maxSize = maxSize;
+  }
+
+  @Override
+  public boolean add(T e) {
+    return offer(e);
+  }
+
+  @Override
+  public boolean addAll(Collection<? extends T> c) {
+    size += c.size();        // Between here and below we might reject offers,
+    if (size > maxSize) {    // if over maxSize, but that's ok
+      size -= c.size();      // We're over, just back out and return.
+      return false;
+    }
+    return super.addAll(c);  // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public void clear() {
+    super.clear();
+    size = 0;
+  }
+
+  @Override
+  public boolean offer(T e) {
+    if (++size > maxSize) {
+      --size;                // We didn't take it after all
+      return false;
+    }
+    return super.offer(e);   // Always true for ConcurrentLinkedQueue
+  }
+
+  @Override
+  public T poll() {
+    T result = super.poll();
+    if (result != null) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public boolean remove(Object o) {
+    boolean result = super.remove(o);
+    if (result) {
+      --size;
+    }
+    return result;
+  }
+
+  @Override
+  public int size() {
+    return (int) size;
+  }
+
+  public void drainTo(Collection<T> list) {
+    long removed = 0;
+    T l;
+    while ((l = super.poll()) != null) {
+      list.add(l);
+      removed++;
+    }
+    // Limit the number of operations on a volatile by only reporting size
+    // change after the drain is completed.
+    size -= removed;
+  }
+
+  public long remainingCapacity() {
+    long remaining = maxSize - size;
+    return remaining >= 0 ? remaining : 0;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/81e793e5/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
new file mode 100644
index 0000000..3453f24
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestBoundedConcurrentLinkedQueue.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hadoop.hbase.util;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertFalse;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({MiscTests.class, SmallTests.class})
+public class TestBoundedConcurrentLinkedQueue {
+  private final static int CAPACITY = 16;
+
+  private BoundedConcurrentLinkedQueue<Long> queue;
+
+  @Before
+  public void setUp() throws Exception {
+    this.queue = new BoundedConcurrentLinkedQueue<Long>(CAPACITY);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+  }
+
+  @Test
+  public void testOfferAndPoll() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Poll
+    for (int i = 1; i <= CAPACITY; ++i) {
+      long l = queue.poll();
+      assertEquals(i, l);
+      assertEquals(CAPACITY - i, queue.size());
+      assertEquals(i, queue.remainingCapacity());
+    }
+    assertEquals(null, queue.poll());
+  }
+
+  @Test
+  public void testDrain() throws Exception {
+    // Offer
+    for (long i = 1; i <= CAPACITY; ++i) {
+      assertTrue(queue.offer(i));
+      assertEquals(i, queue.size());
+      assertEquals(CAPACITY - i, queue.remainingCapacity());
+    }
+    assertFalse(queue.offer(0L));
+
+    // Drain
+    List<Long> list = new ArrayList<Long>();
+    queue.drainTo(list);
+    assertEquals(null, queue.poll());
+    assertEquals(0, queue.size());
+    assertEquals(CAPACITY, queue.remainingCapacity());
+  }
+}