You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/08/02 00:28:06 UTC

[2/3] hbase git commit: HBASE-18395 Update clock on region open and close (revision 5)

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
index b3b0831..cb1a47c 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/HBaseProtos.java
@@ -18997,6 +18997,485 @@ public final class HBaseProtos {
 
   }
 
+  public interface NodeTimeOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.NodeTime)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>optional uint64 time = 1;</code>
+     */
+    boolean hasTime();
+    /**
+     * <code>optional uint64 time = 1;</code>
+     */
+    long getTime();
+  }
+  /**
+   * <pre>
+   **
+   * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+   * timestamp using TimestampType.
+   * </pre>
+   *
+   * Protobuf type {@code hbase.pb.NodeTime}
+   */
+  public  static final class NodeTime extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.NodeTime)
+      NodeTimeOrBuilder {
+    // Use NodeTime.newBuilder() to construct.
+    private NodeTime(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private NodeTime() {
+      time_ = 0L;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private NodeTime(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              time_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.class, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int TIME_FIELD_NUMBER = 1;
+    private long time_;
+    /**
+     * <code>optional uint64 time = 1;</code>
+     */
+    public boolean hasTime() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>optional uint64 time = 1;</code>
+     */
+    public long getTime() {
+      return time_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, time_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, time_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime other = (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) obj;
+
+      boolean result = true;
+      result = result && (hasTime() == other.hasTime());
+      if (hasTime()) {
+        result = result && (getTime()
+            == other.getTime());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptor().hashCode();
+      if (hasTime()) {
+        hash = (37 * hash) + TIME_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getTime());
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * <pre>
+     **
+     * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+     * timestamp using TimestampType.
+     * </pre>
+     *
+     * Protobuf type {@code hbase.pb.NodeTime}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.NodeTime)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTimeOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.class, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        time_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.internal_static_hbase_pb_NodeTime_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime result = new org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.time_ = time_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime.getDefaultInstance()) return this;
+        if (other.hasTime()) {
+          setTime(other.getTime());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private long time_ ;
+      /**
+       * <code>optional uint64 time = 1;</code>
+       */
+      public boolean hasTime() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>optional uint64 time = 1;</code>
+       */
+      public long getTime() {
+        return time_;
+      }
+      /**
+       * <code>optional uint64 time = 1;</code>
+       */
+      public Builder setTime(long value) {
+        bitField0_ |= 0x00000001;
+        time_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 time = 1;</code>
+       */
+      public Builder clearTime() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        time_ = 0L;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.NodeTime)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.NodeTime)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<NodeTime>() {
+      public NodeTime parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new NodeTime(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<NodeTime> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NodeTime getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
   private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
     internal_static_hbase_pb_TableName_descriptor;
   private static final 
@@ -19117,6 +19596,11 @@ public final class HBaseProtos {
   private static final 
     org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
       internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable;
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_hbase_pb_NodeTime_descriptor;
+  private static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_hbase_pb_NodeTime_fieldAccessorTable;
 
   public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -19175,14 +19659,15 @@ public final class HBaseProtos {
       "\t\022\025\n\rversion_major\030\007 \001(\r\022\025\n\rversion_mino" +
       "r\030\010 \001(\r\"Q\n\020RegionServerInfo\022\020\n\010infoPort\030" +
       "\001 \001(\005\022+\n\014version_info\030\002 \001(\0132\025.hbase.pb.V" +
-      "ersionInfo*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rL",
-      "ESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003" +
-      "\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005N" +
-      "O_OP\020\006*n\n\010TimeUnit\022\017\n\013NANOSECONDS\020\001\022\020\n\014M" +
-      "ICROSECONDS\020\002\022\020\n\014MILLISECONDS\020\003\022\013\n\007SECON" +
-      "DS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007BE" +
-      "\n1org.apache.hadoop.hbase.shaded.protobu" +
-      "f.generatedB\013HBaseProtosH\001\240\001\001"
+      "ersionInfo\"\030\n\010NodeTime\022\014\n\004time\030\001 \001(\004*r\n\013",
+      "CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001" +
+      "\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR" +
+      "_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Time" +
+      "Unit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022" +
+      "\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTE" +
+      "S\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007BE\n1org.apache.h" +
+      "adoop.hbase.shaded.protobuf.generatedB\013H" +
+      "BaseProtosH\001\240\001\001"
     };
     org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
         new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
@@ -19340,6 +19825,12 @@ public final class HBaseProtos {
       org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
         internal_static_hbase_pb_RegionServerInfo_descriptor,
         new java.lang.String[] { "InfoPort", "VersionInfo", });
+    internal_static_hbase_pb_NodeTime_descriptor =
+      getDescriptor().getMessageTypes().get(24);
+    internal_static_hbase_pb_NodeTime_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_hbase_pb_NodeTime_descriptor,
+        new java.lang.String[] { "Time", });
   }
 
   // @@protoc_insertion_point(outer_class_scope)

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 6d67c89..7808618 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -81,6 +81,8 @@ message OpenRegionRequest {
   optional uint64 serverStartCode = 2;
   // wall clock time from master
   optional uint64 master_system_time = 5;
+  // physical or hybrid timestamp from master clock
+  optional NodeTime nodeTime = 6;
 
   message RegionOpenInfo {
     required RegionInfo region = 1;
@@ -93,6 +95,8 @@ message OpenRegionRequest {
 
 message OpenRegionResponse {
   repeated RegionOpeningState opening_state = 1;
+  // physical or hybrid timestamp from region server clock
+  optional NodeTime nodeTime = 2;
 
   enum RegionOpeningState {
     OPENED = 0;
@@ -120,10 +124,14 @@ message CloseRegionRequest {
   optional ServerName destination_server = 4;
   // the intended server for this RPC.
   optional uint64 serverStartCode = 5;
+  // physical or hybrid timestamp from master clock
+  optional NodeTime nodeTime = 6;
 }
 
 message CloseRegionResponse {
   required bool closed = 1;
+  // physical or hybrid timestamp from region server clock
+  optional NodeTime nodeTime = 2;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol-shaded/src/main/protobuf/HBase.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/HBase.proto b/hbase-protocol-shaded/src/main/protobuf/HBase.proto
index 10742ad..ebf9385 100644
--- a/hbase-protocol-shaded/src/main/protobuf/HBase.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/HBase.proto
@@ -235,3 +235,11 @@ message RegionServerInfo {
   optional int32 infoPort = 1;
   optional VersionInfo version_info = 2;
 }
+
+/**
+ * Used to send timestamp of node. The timestamp can be interpreted as either a physical or hybrid
+ * timestamp using TimestampType.
+ */
+message NodeTime {
+  optional uint64 time = 1;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index e25064f..76f6813 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -22636,7 +22636,7 @@ public final class ClientProtos {
        * </pre>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Cursor.Builder, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CursorOrBuilder> 
           getCursorFieldBuilder() {
         if (cursorBuilder_ == null) {
           cursorBuilder_ = new com.google.protobuf.SingleFieldBuilder<

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
index 717ec73..e3bb364 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/QuotaProtos.java
@@ -982,7 +982,7 @@ public final class QuotaProtos {
 
       public final boolean isInitialized() {
         if (!hasTimeUnit()) {
-
+          
           return false;
         }
         return true;
@@ -2009,37 +2009,37 @@ public final class QuotaProtos {
       public final boolean isInitialized() {
         if (hasReqNum()) {
           if (!getReqNum().isInitialized()) {
-
+            
             return false;
           }
         }
         if (hasReqSize()) {
           if (!getReqSize().isInitialized()) {
-
+            
             return false;
           }
         }
         if (hasWriteNum()) {
           if (!getWriteNum().isInitialized()) {
-
+            
             return false;
           }
         }
         if (hasWriteSize()) {
           if (!getWriteSize().isInitialized()) {
-
+            
             return false;
           }
         }
         if (hasReadNum()) {
           if (!getReadNum().isInitialized()) {
-
+            
             return false;
           }
         }
         if (hasReadSize()) {
           if (!getReadSize().isInitialized()) {
-
+            
             return false;
           }
         }
@@ -2169,7 +2169,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota req_num = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getReqNumFieldBuilder() {
         if (reqNumBuilder_ == null) {
           reqNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2286,7 +2286,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota req_size = 2;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getReqSizeFieldBuilder() {
         if (reqSizeBuilder_ == null) {
           reqSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2403,7 +2403,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota write_num = 3;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getWriteNumFieldBuilder() {
         if (writeNumBuilder_ == null) {
           writeNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2520,7 +2520,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota write_size = 4;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getWriteSizeFieldBuilder() {
         if (writeSizeBuilder_ == null) {
           writeSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2637,7 +2637,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota read_num = 5;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getReadNumFieldBuilder() {
         if (readNumBuilder_ == null) {
           readNumBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -2754,7 +2754,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota read_size = 6;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getReadSizeFieldBuilder() {
         if (readSizeBuilder_ == null) {
           readSizeBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -3244,7 +3244,7 @@ public final class QuotaProtos {
       public final boolean isInitialized() {
         if (hasTimedQuota()) {
           if (!getTimedQuota().isInitialized()) {
-
+            
             return false;
           }
         }
@@ -3410,7 +3410,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.TimedQuota timed_quota = 2;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.TimedQuotaOrBuilder> 
           getTimedQuotaFieldBuilder() {
         if (timedQuotaBuilder_ == null) {
           timedQuotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -3978,7 +3978,7 @@ public final class QuotaProtos {
       public final boolean isInitialized() {
         if (hasThrottle()) {
           if (!getThrottle().isInitialized()) {
-
+            
             return false;
           }
         }
@@ -4141,7 +4141,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.Throttle throttle = 2;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Throttle.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.ThrottleOrBuilder> 
           getThrottleFieldBuilder() {
         if (throttleBuilder_ == null) {
           throttleBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -4258,7 +4258,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota space = 3;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
           getSpaceFieldBuilder() {
         if (spaceBuilder_ == null) {
           spaceBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -5876,7 +5876,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuota quota = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuota.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaOrBuilder> 
           getQuotaFieldBuilder() {
         if (quotaBuilder_ == null) {
           quotaBuilder_ = new com.google.protobuf.SingleFieldBuilder<
@@ -7088,7 +7088,7 @@ public final class QuotaProtos {
        * <code>optional .hbase.pb.SpaceQuotaStatus quota_status = 1;</code>
        */
       private com.google.protobuf.SingleFieldBuilder<
-          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder>
+          org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatus.Builder, org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.SpaceQuotaStatusOrBuilder> 
           getQuotaStatusFieldBuilder() {
         if (quotaStatusBuilder_ == null) {
           quotaStatusBuilder_ = new com.google.protobuf.SingleFieldBuilder<

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 67fda75..0ded810 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -53,6 +53,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.ClusterStatus;
 import org.apache.hadoop.hbase.CoordinatedStateException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -576,6 +578,25 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
+  public Clock getClock(ClockType clockType) {
+    return super.getClock(clockType);
+  }
+
+  @Override
+  public long updateClock(long timestamp) {
+    return super.updateClock(timestamp);
+  }
+
+  /**
+   * Only for the purpose of testing
+   * @param clock
+   */
+  @VisibleForTesting
+  public void setClock(Clock clock) {
+    super.setClock(clock);
+  }
+
+  @Override
   protected TableDescriptors getFsTableDescriptors() throws IOException {
     return super.getFsTableDescriptors();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index c515435..2641b8d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hbase.master;
 import java.io.IOException;
 import java.util.List;
 
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -58,6 +60,18 @@ import com.google.protobuf.Service;
 @InterfaceAudience.Private
 public interface MasterServices extends Server {
   /**
+   * @param clockType The clock type
+   * @return Master's instance of {@link Clock}
+   */
+  Clock getClock(ClockType clockType);
+
+  /**
+   * @param timestamp The timestamp
+   * @return The current physical or hybrid time of the clock after it is updated
+   */
+  long updateClock(long timestamp);
+
+  /**
    * @return the underlying snapshot manager
    */
   SnapshotManager getSnapshotManager();

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 4dff6f4..bafe2ee 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -771,7 +771,7 @@ public class ServerManager {
         " failed because no RPC connection found to this server");
     }
     OpenRegionRequest request =
-        RequestConverter.buildOpenRegionRequest(server, region, favoredNodes, false);
+        RequestConverter.buildOpenRegionRequest(server, region, favoredNodes, false, null);
     try {
       OpenRegionResponse response = admin.openRegion(null, request);
       return ResponseConverter.getRegionOpeningState(response);
@@ -845,7 +845,7 @@ public class ServerManager {
    * @return a list of region opening states
    */
   public List<RegionOpeningState> sendRegionOpen(ServerName server,
-      List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos)
+      List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos, Long masterClockTime)
   throws IOException {
     AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
@@ -854,7 +854,7 @@ public class ServerManager {
     }
 
     OpenRegionRequest request =
-        RequestConverter.buildOpenRegionRequest(server, regionOpenInfos, false);
+        RequestConverter.buildOpenRegionRequest(server, regionOpenInfos, false, masterClockTime);
     try {
       OpenRegionResponse response = admin.openRegion(null, request);
       return ResponseConverter.getRegionOpeningStateList(response);
@@ -879,7 +879,7 @@ public class ServerManager {
    * @throws IOException
    */
   public boolean sendRegionClose(ServerName server, HRegionInfo region,
-      ServerName dest) throws IOException {
+      ServerName dest, Long masterClockTime) throws IOException {
     if (server == null) throw new NullPointerException("Passed server is null");
     AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
@@ -889,12 +889,12 @@ public class ServerManager {
         " failed because no RPC connection found to this server");
     }
     HBaseRpcController controller = newRpcController();
-    return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(), dest);
+    return ProtobufUtil.closeRegion(controller, admin, server, region.getRegionName(), dest, masterClockTime);
   }
 
   public boolean sendRegionClose(ServerName server,
-      HRegionInfo region) throws IOException {
-    return sendRegionClose(server, region, null);
+      HRegionInfo region, Long masterClockTime) throws IOException {
+    return sendRegionClose(server, region, masterClockTime);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index eda1128..df05405 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -30,9 +30,12 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.hbase.Clock;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.ServerListener;
@@ -266,6 +269,16 @@ public class RSProcedureDispatcher
 
       try {
         final ExecuteProceduresResponse response = sendRequest(getServerName(), request.build());
+        for (OpenRegionResponse orr : response.getOpenRegionList()) {
+          if (orr.hasNodeTime()) {
+            env.getMasterServices().updateClock(orr.getNodeTime().getTime());
+          }
+        }
+        for (CloseRegionResponse crr : response.getCloseRegionList()) {
+          if (crr.hasNodeTime()) {
+            env.getMasterServices().updateClock(crr.getNodeTime().getTime());
+          }
+        }
         remoteCallCompleted(env, response);
       } catch (IOException e) {
         e = unwrapException(e);
@@ -286,7 +299,7 @@ public class RSProcedureDispatcher
     public void dispatchCloseRequests(final MasterProcedureEnv env,
         final List<RegionCloseOperation> operations) {
       for (RegionCloseOperation op: operations) {
-        request.addCloseRegion(op.buildCloseRegionRequest(getServerName()));
+        request.addCloseRegion(op.buildCloseRegionRequest(env, getServerName()));
       }
     }
 
@@ -325,6 +338,14 @@ public class RSProcedureDispatcher
     final OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder();
     builder.setServerStartCode(serverName.getStartcode());
     builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
+
+    // Set master clock time for send event
+    // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+    // but in the future we would intend to sync both HLC and system monotonic clocks
+    Clock clock = env.getMasterServices()
+        .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+    builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(clock.now()));
+
     for (RegionOpenOperation op: operations) {
       builder.addOpenInfo(op.buildRegionOpenInfoRequest(env));
     }
@@ -347,6 +368,10 @@ public class RSProcedureDispatcher
 
       try {
         OpenRegionResponse response = sendRequest(getServerName(), request);
+        if (response.hasNodeTime()) {
+          // Update master clock upon receiving open region response from region server
+          env.getMasterServices().updateClock(response.getNodeTime().getTime());
+        }
         remoteCallCompleted(env, response);
       } catch (IOException e) {
         e = unwrapException(e);
@@ -397,9 +422,17 @@ public class RSProcedureDispatcher
     @Override
     public Void call() {
       final MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
-      final CloseRegionRequest request = operation.buildCloseRegionRequest(getServerName());
+      final CloseRegionRequest request = operation.buildCloseRegionRequest(env, getServerName());
       try {
         CloseRegionResponse response = sendRequest(getServerName(), request);
+        if (response.hasNodeTime()) {
+          // Update master clock upon receiving close region response from region server
+          // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta
+          // table, but in the future we would intend to sync both HLC and system monotonic clocks
+          Clock clock = env.getMasterServices()
+              .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+          clock.update(response.getNodeTime().getTime());
+        }
         remoteCallCompleted(env, response);
       } catch (IOException e) {
         e = unwrapException(e);
@@ -536,9 +569,14 @@ public class RSProcedureDispatcher
       return closed;
     }
 
-    public CloseRegionRequest buildCloseRegionRequest(final ServerName serverName) {
+    public CloseRegionRequest buildCloseRegionRequest(final MasterProcedureEnv env,
+        final ServerName serverName) {
+      // Set master clock time for send event
+      // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+      // but in the future we would intend to sync both HLC and system monotonic clocks
+      Clock clock = env.getMasterServices().getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
       return ProtobufUtil.buildCloseRegionRequest(serverName,
-        getRegionInfo().getRegionName(), getDestinationServer());
+        getRegionInfo().getRegionName(), getDestinationServer(), clock.now());
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a55be97..2b82e4c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
@@ -387,7 +386,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
   @Override
   public Clock getClock() {
     if (this.clock == null) {
-      return this.getRegionServerServices().getRegionServerClock(
+      return this.getRegionServerServices().getClock(
           getTableDescriptor().getClockType());
     }
     return this.clock;
@@ -798,7 +797,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
         ? DEFAULT_DURABILITY
         : htd.getDurability();
     if (rsServices != null) {
-      this.clock = rsServices.getRegionServerClock(htd.getClockType());
+      this.clock = rsServices.getClock(htd.getClockType());
       this.rsAccounting = this.rsServices.getRegionServerAccounting();
       // don't initialize coprocessors if not running within a regionserver
       // TODO: revisit if coprocessors should load in other cases
@@ -1012,6 +1011,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
     long maxSeqId = -1;
     // initialized to -1 so that we pick up MemstoreTS from column families
     long maxMemstoreTS = -1;
+    // Largest timestamp found across all stores
+    long maxTimestamp = 0;
 
     if (htableDescriptor.getColumnFamilyCount() != 0) {
       // initialize the thread pool for opening stores in parallel.
@@ -1050,8 +1051,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
           if (maxStoreMemstoreTS > maxMemstoreTS) {
             maxMemstoreTS = maxStoreMemstoreTS;
           }
+          maxTimestamp = Math.max(maxTimestamp, store.getMaxTimestamp());
         }
         allStoresOpened = true;
+        clock.update(maxTimestamp);
         if(hasSloppyStores) {
           htableDescriptor = TableDescriptorBuilder.newBuilder(htableDescriptor)
                   .setFlushPolicyClassName(FlushNonSloppyStoresFirstPolicy.class.getName())

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index f623f88..d8d87f8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -86,6 +86,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.YouAreDeadException;
 import org.apache.hadoop.hbase.ZNodeClearer;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -336,9 +337,9 @@ public class HRegionServer extends HasThread implements
   // Region server contains instances of all three clock clocks. Regions have a set
   // clock type so depending on the clock type needed by a region, the appropriate
   // one can be accessed.
-  final protected Clock hybridLogicalClock;
-  final protected Clock systemMonotonicClock;
-  final protected Clock systemClock;
+  protected Clock hybridLogicalClock;
+  protected Clock systemMonotonicClock;
+  protected Clock systemClock;
 
   ConcurrentMap<String, Integer> rowlocks = new ConcurrentHashMap<>();
 
@@ -2087,14 +2088,46 @@ public class HRegionServer extends HasThread implements
   }
 
   @Override
-  public Clock getRegionServerClock(ClockType clockType) {
-    if (clockType.equals(ClockType.HLC)){
-      return this.hybridLogicalClock;
-    } else if (clockType.equals(ClockType.SYSTEM_MONOTONIC)) {
-      return this.systemMonotonicClock;
-    } else {
-      return this.systemClock;
+  public Clock getClock(ClockType clockType) {
+    switch (clockType) {
+      case HLC:
+        return this.hybridLogicalClock;
+      case SYSTEM_MONOTONIC:
+        return this.systemMonotonicClock;
+      case SYSTEM:
+        return this.systemClock;
+      default:
+        throw new IllegalArgumentException("Wrong clock type: " + clockType.toString());
+    }
+  }
+
+  /**
+   * Only for the purpose of testing
+   * @param clock
+   */
+  @VisibleForTesting
+  public void setClock(Clock clock) {
+    switch (clock.getClockType()) {
+      case HLC:
+        this.hybridLogicalClock = clock;
+        break;
+      case SYSTEM_MONOTONIC:
+        this.systemMonotonicClock = clock;
+        break;
+      case SYSTEM:
+        this.systemClock = clock;
+        break;
+      default:
+        throw new IllegalArgumentException("Wrong clock type: " + clock.getClockType().toString());
+    }
+  }
+
+  @Override
+  public long updateClock(long timestamp) {
+    if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+      return this.hybridLogicalClock.update(timestamp);
     }
+    return this.systemMonotonicClock.update(timestamp);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 4b2b460..7245597 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -92,7 +92,6 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.Clock;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -481,6 +480,10 @@ public class HStore implements Store {
     return StoreUtils.getMaxMemstoreTSInList(this.getStorefiles());
   }
 
+  public long getMaxTimestamp() {
+    return StoreUtils.getMaxTimestampInList(this.getStorefiles());
+  }
+
   /**
    * @param tabledir {@link Path} to where the table is being stored
    * @param hri {@link HRegionInfo} for the region.

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 8bd1b5d..2fd00d9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.Clock;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -200,6 +201,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanReques
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
@@ -226,8 +228,6 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.zookeeper.KeeperException;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * Implements the regionserver RPC services.
  */
@@ -1514,6 +1514,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
       }
       final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
 
+      if (request.hasNodeTime()) {
+        this.regionServer.updateClock(request.getNodeTime().getTime());
+      }
+
       requestCount.increment();
       if (sn == null) {
         LOG.info("Close " + encodedRegionName + " without moving");
@@ -1521,7 +1525,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         LOG.info("Close " + encodedRegionName + ", moving to " + sn);
       }
       boolean closed = regionServer.closeRegion(encodedRegionName, false, sn);
-      CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
+      // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+      // but in the future we would intend to sync both HLC and system monotonic clocks
+      long regionServerClockTime = this.regionServer
+          .getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE).now();
+
+      CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder()
+          .setClosed(closed)
+          .setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(regionServerClockTime));
       return builder.build();
     } catch (IOException ie) {
       throw new ServiceException(ie);
@@ -1896,6 +1907,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
     long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
 
+    // Update region server clock on receive event
+    if (request.hasNodeTime()) {
+      this.regionServer.updateClock(request.getNodeTime().getTime());
+    }
+
     for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
       final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
       HTableDescriptor htd;
@@ -1947,7 +1963,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             // Check if current region open is for distributedLogReplay. This check is to support
             // rolling restart/upgrade where we want to Master/RS see same configuration
             if (!regionOpenInfo.hasOpenForDistributedLogReplay()
-                  || regionOpenInfo.getOpenForDistributedLogReplay()) {
+                || regionOpenInfo.getOpenForDistributedLogReplay()) {
               regionServer.recoveringRegions.put(region.getEncodedName(), null);
             } else {
               // Remove stale recovery region from ZK when we open region not for recovering which
@@ -1978,10 +1994,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
             }
             if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
               regionServer.service.submit(new OpenPriorityRegionHandler(
-                regionServer, regionServer, region, htd, masterSystemTime));
+                  regionServer, regionServer, region, htd, masterSystemTime));
             } else {
               regionServer.service.submit(new OpenRegionHandler(
-                regionServer, regionServer, region, htd, masterSystemTime));
+                  regionServer, regionServer, region, htd, masterSystemTime));
             }
           }
         }
@@ -2000,6 +2016,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
         }
       }
     }
+
+    // Set clock for send event
+    // TODO: For now we only sync meta's clock in order to verify HLC functionality on meta table,
+    // but in the future we would intend to sync both HLC and system monotonic clocks
+    Clock clock = this.regionServer.getClock(HTableDescriptor.DEFAULT_META_CLOCK_TYPE);
+    builder.setNodeTime(HBaseProtos.NodeTime.newBuilder().setTime(clock.now()));
+
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 5c37136..9b17e47 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -60,7 +60,17 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
    * default (common) WAL */
   WAL getWAL(HRegionInfo regionInfo) throws IOException;
 
-  Clock getRegionServerClock(ClockType clockType);
+  /**
+   * @param clockType The clock type
+   * @return Region server's instance of {@link Clock}
+   */
+  Clock getClock(ClockType clockType);
+
+  /**
+   * @param timestamp The timestamp
+   * @return The current physical or hybrid time of the clock after it is updated
+   */
+  long updateClock(long timestamp);
 
   /** @return the List of WALs that are used by this server
    *  Doesn't include the meta WAL

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
index 5623adb..a1903b5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreUtils.java
@@ -114,6 +114,14 @@ public class StoreUtils {
     return max;
   }
 
+  public static long getMaxTimestampInList(Collection<StoreFile> sfs) {
+    long max = 0;
+    for (StoreFile sf : sfs) {
+      max = Math.max(max, sf.getMaximumTimestamp().orElse(Long.MIN_VALUE));
+    }
+    return max;
+  }
+
   /**
    * Gets the approximate mid-point of the given file that is optimal for use in splitting it.
    * @param file the store file

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 9f568f2..0640457 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -253,11 +253,19 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public Clock getRegionServerClock(ClockType clockType) {
+  public Clock getClock(ClockType clockType) {
     return Clock.getDummyClockOfGivenClockType(clockType);
   }
 
   @Override
+  public long updateClock(long timestamp) {
+    if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+      return new Clock.HLC().update(timestamp);
+    }
+    return new Clock.SystemMonotonic().update(timestamp);
+  }
+
+  @Override
   public ExecutorService getExecutorService() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
index 572a128..bc95f46 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestClockWithCluster.java
@@ -19,8 +19,13 @@
 package org.apache.hadoop.hbase;
 
 import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -28,37 +33,81 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.TimestampType;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.assignment.RegionStates;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.junit.After;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-
+import org.junit.rules.TestName;
 
 @Category({MediumTests.class})
 public class TestClockWithCluster {
   private static final Log LOG = LogFactory.getLog(TestClockWithCluster.class);
-  private static final  HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  @Rule
+  public TestName name = new TestName();
+  private static final HBaseTestingUtility HBTU = new HBaseTestingUtility();
   private static Connection connection;
-  private byte[] columnFamily = Bytes.toBytes("testCF");
+
+  private Admin admin;
+  private TableName tableName;
+  private Table table;
+
+  // Test names
+  private static final byte[] TEST_FAMILY = Bytes.toBytes("f1");
+
   @BeforeClass
   public static void setupClass() throws Exception {
-    UTIL.startMiniCluster(1);
-    connection = ConnectionFactory.createConnection(UTIL.getConfiguration());
+    final int NUM_MASTERS = 1;
+    final int NUM_RS = 1;
+    HBTU.startMiniCluster(NUM_MASTERS, NUM_RS);
+    connection = ConnectionFactory.createConnection(HBTU.getConfiguration());
   }
 
   @AfterClass
   public static void tearDownClass() throws Exception {
     connection.close();
-    UTIL.shutdownMiniCluster();
+    HBTU.shutdownMiniCluster();
+  }
+
+  @Before
+  public void setup() throws Exception {
+    admin = connection.getAdmin();
+    tableName = TableName.valueOf(name.getMethodName());
+    admin.createTable(TableDescriptorBuilder.newBuilder(tableName)
+        .addColumnFamily(new HColumnDescriptor(TEST_FAMILY))
+        .build());
+    table = connection.getTable(tableName);
+  }
+
+  @After
+  public void teardown() throws Exception {
+    try {
+      if (table != null) {
+        table.close();
+      }
+    } finally {
+      try {
+        HBTU.deleteTable(tableName);
+      } catch (IOException ioe) {
+        LOG.error("Failed deleting table '" + tableName + "' during teardown. Exception:" + ioe);
+      }
+    }
   }
 
   private void verifyTimestamps(Table table, final byte[] f, int startRow, int endRow,
@@ -75,29 +124,21 @@ public class TestClockWithCluster {
 
   @Test
   public void testNewTablesAreCreatedWithSystemClock() throws IOException {
-    Admin admin = connection.getAdmin();
-    TableName tableName = TableName.valueOf("TestNewTablesAreSystemByDefault");
-    admin.createTable(new HTableDescriptor(tableName).addFamily(new
-      HColumnDescriptor(columnFamily)));
-
-    Table table = connection.getTable(tableName);
-
     ClockType clockType = admin.getTableDescriptor(tableName).getClockType();
     assertEquals(ClockType.SYSTEM, clockType);
     // write
-    UTIL.loadNumericRows(table, columnFamily, 0, 1000);
+    HBTU.loadNumericRows(table, TEST_FAMILY, 0, 1000);
     // read , check if the it is same.
-    UTIL.verifyNumericRows(table, Bytes.toBytes("testCF"), 0, 1000, 0);
+    HBTU.verifyNumericRows(table, TEST_FAMILY, 0, 1000, 0);
 
     // This check will be useful if Clock type were to be system monotonic or HLC.
-    verifyTimestamps(table, columnFamily, 0, 1000, TimestampType.PHYSICAL);
+    verifyTimestamps(table, TEST_FAMILY, 0, 1000, TimestampType.PHYSICAL);
   }
 
   @Test
   public void testMetaTableClockTypeIsHLC() throws IOException {
-    Admin admin = connection.getAdmin();
-    Table table = connection.getTable(TableName.META_TABLE_NAME);
-    ClockType clockType = admin.getTableDescriptor(TableName.META_TABLE_NAME).getClockType();
+    ClockType clockType = admin
+      .getTableDescriptor(TableName.META_TABLE_NAME).getClockType();
     assertEquals(ClockType.HLC, clockType);
   }
 
@@ -112,4 +153,214 @@ public class TestClockWithCluster {
       assertTrue(TimestampType.HYBRID.isLikelyOfType(cell.getTimestamp()));
     }
   }
+
+  private long getColumnLatestCellTimestamp(HRegionInfo hri) throws IOException {
+    Result result = MetaTableAccessor.getRegionResult(connection, hri.getRegionName());
+    Cell cell = result.getColumnLatestCell(HConstants.CATALOG_FAMILY, HConstants.STATE_QUALIFIER);
+    return cell.getTimestamp();
+  }
+
+  private void assertHLCTime(Clock.HLC clock, long expectedPhysicalTime, long expectedLogicalTime) {
+    assertEquals(expectedPhysicalTime, clock.getPhysicalTime());
+    assertEquals(expectedLogicalTime, clock.getLogicalTime());
+  }
+
+  @Test
+  public void testRegionStateTransitionTimestampsIncreaseMonotonically() throws Exception {
+    HRegionServer rs = HBTU.getRSForFirstRegionInTable(tableName);
+    List<Region> regions = rs.getOnlineRegions();
+
+    assert(!regions.isEmpty());
+
+    MiniHBaseCluster cluster = HBTU.getHBaseCluster();
+
+    assertTrue(cluster.waitForActiveAndReadyMaster());
+    HMaster master = cluster.getMaster();
+    assertTrue(master.isActiveMaster());
+    assertTrue(master.isInitialized());
+
+    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
+
+    assertEquals(3, cluster.countServedRegions());
+    HRegionInfo hriOnline;
+    try (RegionLocator locator =
+        HBTU.getConnection().getRegionLocator(tableName)) {
+      hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+    }
+
+    HRegion regionMeta = null;
+    for (Region r : master.getOnlineRegions()) {
+      if (r.getRegionInfo().isMetaRegion()) {
+        regionMeta = ((HRegion) r);
+      }
+    }
+
+    assertNotNull(regionMeta);
+
+    // Inject physical clock that always returns same physical time into hybrid logical clock
+    long systemTime = Clock.DEFAULT_JAVA_MILLIS_PHYSICAL_CLOCK.now();
+    Clock.PhysicalClock physicalClock = mock(Clock.PhysicalClock.class);
+    when(physicalClock.now()).thenReturn(systemTime);
+    when(physicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+    Clock.HLC clock = new Clock.HLC(physicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+
+    // The region clock is used for setting timestamps for table mutations and the region server
+    // clock is used for updating the clock on region assign/unassign events.
+
+    // Set meta region clock so that region state transitions are timestamped with mocked clock
+    regionMeta.setClock(clock);
+    master.setClock(clock);
+
+    HRegion userRegion = null;
+    for (Region region : regions) {
+      if (region.getRegionInfo().getTable().equals(tableName)) {
+        userRegion = (HRegion) region;
+      }
+    }
+    assertNotNull(userRegion);
+
+    // Only mock the region server clock because the region clock does not get used during
+    // unassignment and assignment
+    rs.setClock(clock);
+
+    // Repeatedly unassign and assign region while tracking the timestamps of the region state
+    // transitions from the meta table
+    List<Long> timestamps = new ArrayList<>();
+    // Set expected logical time to 0 as initial clock.now() sets clock's logical time to 0
+    long expectedLogicalTime = TimestampType.HYBRID.getLogicalTime(clock.now());
+    for (int i = 0; i < 10; i++) {
+      admin.unassign(hriOnline.getRegionName(), false);
+      assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+      // clock.now() is called 8 times and clock.update() is called 2 times, each call increments
+      // the logical time by one.
+      // 0   [now]    Get region info from hbase:meta in HBaseAdmin#unassign
+      // 1   [now]    Get region info from hbase:meta in MasterRpcServices#unassignRegion
+      // 2,3 [now]    Update hbase:meta
+      // 4   [now]    Send unassign region request to region server
+      // 5   [update] Upon region region server clock upon receiving unassign region request
+      // 6   [now]    Send region server response back to master
+      // 7   [update] Update master clock upon close region response form region server
+      // 8,9 [now]    Update hbase:meta
+      expectedLogicalTime += 10;
+
+      assertEquals(expectedLogicalTime, clock.getLogicalTime());
+      timestamps.add(clock.getLogicalTime());
+
+      admin.assign(hriOnline.getRegionName());
+      // clock.now() is called 7 times and clock.update() is called 2 times, each call increments
+      // the logical time by one.
+      // 0   [now]    Get region info from hbase:meta in HBaseAdmin#unassign
+      // 1,2 [now]    Update hbase:meta
+      // 3   [now]    Send unassign region request to region server
+      // 4   [update] Update region region server clock upon receiving unassign region request
+      // 5   [now]    Send region server response back to master
+      // 6   [update] Update master clock upon close region response form region server
+      // 7,8 [now]    Update hbase:meta
+      // Assignment has one less call to clock.now() because MasterRpcServices#assignRegion instead
+      // gets the region info from assignment manager rather than meta table accessor
+      expectedLogicalTime += 9;
+      assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+      assertEquals(expectedLogicalTime, clock.getLogicalTime());
+      timestamps.add(clock.getLogicalTime());
+    }
+
+    // Ensure that the hybrid timestamps are strictly increasing
+    for (int i = 0; i < timestamps.size() - 1; i++) {
+      if (timestamps.get(i) >= timestamps.get(i + 1)) {
+        Assert.fail("Current ts is " + timestamps.get(i)
+            + ", but the next ts is equal or smaller " + timestamps.get(i + 1));
+      }
+    }
+  }
+
+  @Test
+  public void testRegionOpenAndCloseClockUpdates() throws Exception {
+    HRegionServer rs = HBTU.getRSForFirstRegionInTable(tableName);
+    List<Region> regions = rs.getOnlineRegions();
+
+    assert(!regions.isEmpty());
+
+    MiniHBaseCluster cluster = HBTU.getHBaseCluster();
+
+    assertTrue(cluster.waitForActiveAndReadyMaster());
+    HMaster master = cluster.getMaster();
+    assertTrue(master.isActiveMaster());
+    assertTrue(master.isInitialized());
+
+    RegionStates regionStates = master.getAssignmentManager().getRegionStates();
+
+    HRegionInfo hriOnline;
+    try (RegionLocator locator =
+        HBTU.getConnection().getRegionLocator(tableName)) {
+      hriOnline = locator.getRegionLocation(HConstants.EMPTY_START_ROW).getRegionInfo();
+    }
+
+    HRegion regionMeta = null;
+    for (Region r : master.getOnlineRegions()) {
+      if (r.getRegionInfo().isMetaRegion()) {
+        regionMeta = ((HRegion) r);
+      }
+    }
+
+    assertNotNull(regionMeta);
+
+    // Instantiate two hybrid logical clocks with mocked physical clocks
+    long expectedPhysicalTime = Clock.DEFAULT_JAVA_MILLIS_PHYSICAL_CLOCK.now();
+    Clock.PhysicalClock masterPhysicalClock = mock(Clock.PhysicalClock.class);
+    when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+    when(masterPhysicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+    Clock.HLC masterClock = new Clock.HLC(masterPhysicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+    master.setClock(masterClock);
+    regionMeta.setClock(masterClock);
+
+    Clock.PhysicalClock rsPhysicalClock = mock(Clock.PhysicalClock.class);
+    when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+    when(rsPhysicalClock.getTimeUnit()).thenReturn(TimeUnit.MILLISECONDS);
+    Clock.HLC rsClock = new Clock.HLC(rsPhysicalClock, Clock.DEFAULT_MAX_CLOCK_SKEW_IN_MS);
+    // We only mock the region server clock here because the region clock does not get used
+    // during unassignment and assignment
+    rs.setClock(rsClock);
+
+    // Increment master physical clock time
+    expectedPhysicalTime += 1000;
+    when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+
+    // Unassign region, region server should advance its clock upon receiving close region request
+    admin.unassign(hriOnline.getRegionName(), false);
+    assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+    // Verify that region server clock time increased
+    // Previous test has explanation for each event that increases logical time
+    assertHLCTime(masterClock, expectedPhysicalTime, 9);
+    assertHLCTime(rsClock, expectedPhysicalTime, 6);
+
+    // Increase region server physical clock time
+    expectedPhysicalTime += 1000;
+    when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+    // Assign region, master server should advance its clock upon receiving close region response
+    admin.assign(hriOnline.getRegionName());
+    assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+    // Verify that master clock time increased
+    assertHLCTime(masterClock, expectedPhysicalTime, 4);
+    assertHLCTime(rsClock, expectedPhysicalTime, 1);
+
+    // Increment region server physical clock time
+    expectedPhysicalTime += 1000;
+    when(rsPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+    // Unassign region, region server should advance its clock upon receiving close region request
+    admin.unassign(hriOnline.getRegionName(), false);
+    assertEquals(RegionState.State.CLOSED, regionStates.getRegionState(hriOnline).getState());
+    // Verify that master server clock time increased
+    assertHLCTime(masterClock, expectedPhysicalTime, 4);
+    assertHLCTime(rsClock, expectedPhysicalTime, 1);
+
+    // Increase master server physical clock time
+    expectedPhysicalTime += 1000;
+    when(masterPhysicalClock.now()).thenReturn(expectedPhysicalTime);
+    // Assign region, master server should advance its clock upon receiving close region response
+    admin.assign(hriOnline.getRegionName());
+    assertEquals(RegionState.State.OPEN, regionStates.getRegionState(hriOnline).getState());
+    // Verify that region server clock time increased
+    assertHLCTime(masterClock, expectedPhysicalTime, 8);
+    assertHLCTime(rsClock, expectedPhysicalTime, 5);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
index 7b4442b..fd51d0e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestReplicasClient.java
@@ -238,7 +238,7 @@ public class TestReplicasClient {
     } catch (Exception e){}
     // first version is '0'
     AdminProtos.OpenRegionRequest orr = RequestConverter.buildOpenRegionRequest(
-      getRS().getServerName(), hri, null, null);
+      getRS().getServerName(), hri, null, null, null);
     AdminProtos.OpenRegionResponse responseOpen = getRS().getRSRpcServices().openRegion(null, orr);
     Assert.assertEquals(responseOpen.getOpeningStateCount(), 1);
     Assert.assertEquals(responseOpen.getOpeningState(0),
@@ -248,7 +248,7 @@ public class TestReplicasClient {
 
   private void closeRegion(HRegionInfo hri) throws Exception {
     AdminProtos.CloseRegionRequest crr = ProtobufUtil.buildCloseRegionRequest(
-      getRS().getServerName(), hri.getEncodedName());
+      getRS().getServerName(), hri.getEncodedName(), null);
     AdminProtos.CloseRegionResponse responseClose = getRS()
         .getRSRpcServices().closeRegion(null, crr);
     Assert.assertTrue(responseClose.getClosed());

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
index c5a9efc..82e8510 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestIncrementTimeRange.java
@@ -110,6 +110,8 @@ public class TestIncrementTimeRange {
     // test that depends on an evironment edge that is manually moved forward.
     util.getConfiguration().setInt(RemoteProcedureDispatcher.DISPATCH_DELAY_CONF_KEY, 0);
     util.startMiniCluster();
+    // Ensure that current system time is set when clock updates during region open
+    mee.setValue(EnvironmentEdgeManager.currentTime());
     EnvironmentEdgeManager.injectEdge(mee);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 8f878b8..cfae7d0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -22,6 +22,8 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ChoreService;
+import org.apache.hadoop.hbase.Clock;
+import org.apache.hadoop.hbase.ClockType;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -86,6 +88,12 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
+  public Clock getClock(ClockType clockType) { return null; };
+
+  @Override
+  public long updateClock(long timestamp) { return 0; }
+
+  @Override
   public AssignmentManager getAssignmentManager() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/386b1df1/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index 089cf69..3edd445 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.CoordinatedStateManager;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TimestampType;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Result;
@@ -581,11 +582,19 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public Clock getRegionServerClock(ClockType clockType) {
+  public Clock getClock(ClockType clockType) {
     return new Clock.System();
   }
 
   @Override
+  public long updateClock(long timestamp) {
+    if (TimestampType.HYBRID.isLikelyOfType(timestamp)) {
+      return new Clock.HLC().update(timestamp);
+    }
+    return new Clock.SystemMonotonic().update(timestamp);
+  }
+
+  @Override
   public ExecutorService getExecutorService() {
     return null;
   }