You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/07/17 17:58:06 UTC

[13/94] [abbrv] [partial] hbase git commit: Revert "HBASE-17056 Remove checked in PB generated files Selective add of dependency on" Revert for now. Build unstable and some interesting issues around CLASSPATH

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
new file mode 100644
index 0000000..cd08eab
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseInfoTree.java
@@ -0,0 +1,226 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+
+/**
+ * Data structure which is populated with the locations of each field value parsed from the text.
+ *
+ * <p>The locations of primary fields values are retrieved by {@code getLocation} or
+ * {@code getLocations}.  The locations of sub message values are within nested
+ * {@code TextFormatParseInfoTree}s and are retrieve by {@code getNestedTree} or
+ * {@code getNestedTrees}.
+ *
+ * <p>The {@code TextFormatParseInfoTree} is created by a Builder.
+ */
+public class TextFormatParseInfoTree {
+
+  // Defines a mapping between each field's descriptor to the list of locations where
+  // its value(s) were was encountered.
+  private Map<FieldDescriptor, List<TextFormatParseLocation>> locationsFromField;
+
+  // Defines a mapping between a field's descriptor to a list of TextFormatParseInfoTrees for
+  // sub message location information.
+  Map<FieldDescriptor, List<TextFormatParseInfoTree>> subtreesFromField;
+
+  /**
+   * Construct a {@code TextFormatParseInfoTree}.
+   *
+   * @param locationsFromField a map of fields to location in the source code
+   * @param subtreeBuildersFromField a map of fields to parse tree location information builders
+   */
+  private TextFormatParseInfoTree(
+      Map<FieldDescriptor, List<TextFormatParseLocation>> locationsFromField,
+      Map<FieldDescriptor, List<TextFormatParseInfoTree.Builder>> subtreeBuildersFromField) {
+
+    // The maps are unmodifiable.  The values in the maps are unmodifiable.
+    Map<FieldDescriptor, List<TextFormatParseLocation>> locs =
+        new HashMap<FieldDescriptor, List<TextFormatParseLocation>>();
+    for (Entry<FieldDescriptor, List<TextFormatParseLocation>> kv : locationsFromField.entrySet()) {
+      locs.put(kv.getKey(), Collections.unmodifiableList(kv.getValue()));
+    }
+    this.locationsFromField = Collections.unmodifiableMap(locs);
+
+    Map<FieldDescriptor, List<TextFormatParseInfoTree>> subs =
+        new HashMap<FieldDescriptor, List<TextFormatParseInfoTree>>();
+    for (Entry<FieldDescriptor, List<Builder>> kv : subtreeBuildersFromField.entrySet()) {
+      List<TextFormatParseInfoTree> submessagesOfField = new ArrayList<TextFormatParseInfoTree>();
+      for (Builder subBuilder : kv.getValue()) {
+        submessagesOfField.add(subBuilder.build());
+      }
+      subs.put(kv.getKey(), Collections.unmodifiableList(submessagesOfField));
+    }
+    this.subtreesFromField = Collections.unmodifiableMap(subs);
+  }
+
+ /**
+  * Retrieve all the locations of a field.
+  *
+  * @param fieldDescriptor the the @{link FieldDescriptor} of the desired field
+  * @return a list of the locations of values of the field.  If there are not values
+  *         or the field doesn't exist, an empty list is returned.
+  */
+  public List<TextFormatParseLocation> getLocations(final FieldDescriptor fieldDescriptor) {
+    List<TextFormatParseLocation> result = locationsFromField.get(fieldDescriptor);
+    return (result == null) ? Collections.<TextFormatParseLocation>emptyList() : result;
+  }
+
+  /**
+   * Get the location in the source of a field's value.
+   *
+   * <p>Returns the {@link TextFormatParseLocation} for index-th value of the field in the parsed
+   * text.
+   *
+   * @param fieldDescriptor the @{link FieldDescriptor} of the desired field
+   * @param index the index of the value.
+   * @return the {@link TextFormatParseLocation} of the value
+   * @throws IllegalArgumentException index is out of range
+   */
+  public TextFormatParseLocation getLocation(final FieldDescriptor fieldDescriptor, int index) {
+    return getFromList(getLocations(fieldDescriptor), index, fieldDescriptor);
+  }
+
+  /**
+   * Retrieve a list of all the location information trees for a sub message field.
+   *
+   * @param fieldDescriptor the @{link FieldDescriptor} of the desired field
+   * @return A list of {@link TextFormatParseInfoTree}
+   */
+  public List<TextFormatParseInfoTree> getNestedTrees(final FieldDescriptor fieldDescriptor) {
+    List<TextFormatParseInfoTree> result = subtreesFromField.get(fieldDescriptor);
+    return result == null ? Collections.<TextFormatParseInfoTree>emptyList() : result;
+  }
+
+  /**
+   * Returns the parse info tree for the given field, which must be a message type.
+   *
+   * @param fieldDescriptor the @{link FieldDescriptor} of the desired sub message
+   * @param index the index of message value.
+   * @return the {@code ParseInfoTree} of the message value. {@code null} is returned if the field
+   *         doesn't exist or the index is out of range.
+   * @throws IllegalArgumentException if index is out of range
+   */
+  public TextFormatParseInfoTree getNestedTree(final FieldDescriptor fieldDescriptor, int index) {
+    return getFromList(getNestedTrees(fieldDescriptor), index, fieldDescriptor);
+  }
+
+  /**
+   * Create a builder for a {@code ParseInfoTree}.
+   *
+   * @return the builder
+   */
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  private static <T> T getFromList(List<T> list, int index, FieldDescriptor fieldDescriptor) {
+    if (index >= list.size() || index < 0)  {
+      throw new IllegalArgumentException(String.format("Illegal index field: %s, index %d",
+          fieldDescriptor == null ? "<null>" : fieldDescriptor.getName(), index));
+    }
+    return list.get(index);
+  }
+
+  /**
+   * Builder for a {@link TextFormatParseInfoTree}.
+   */
+  public static class Builder {
+
+    private Map<FieldDescriptor, List<TextFormatParseLocation>> locationsFromField;
+
+    // Defines a mapping between a field's descriptor to a list of ParseInfoTrees builders for
+    // sub message location information.
+    private Map<FieldDescriptor, List<Builder>> subtreeBuildersFromField;
+
+     /**
+     * Create a root level {@ParseInfoTree} builder.
+     */
+    private Builder() {
+      locationsFromField = new HashMap<FieldDescriptor, List<TextFormatParseLocation>>();
+      subtreeBuildersFromField = new HashMap<FieldDescriptor, List<Builder>>();
+    }
+
+    /**
+     * Record the starting location of a single value for a field.
+     *
+     * @param fieldDescriptor the field
+     * @param location source code location information
+     */
+    public Builder setLocation(
+        final FieldDescriptor fieldDescriptor, TextFormatParseLocation location) {
+      List<TextFormatParseLocation> fieldLocations = locationsFromField.get(fieldDescriptor);
+      if (fieldLocations == null) {
+        fieldLocations = new ArrayList<TextFormatParseLocation>();
+        locationsFromField.put(fieldDescriptor, fieldLocations);
+      }
+      fieldLocations.add(location);
+      return this;
+    }
+
+    /**
+     * Set for a sub message.
+     *
+     * <p>A new builder is created for a sub message. The builder that is returned is a new builder.
+     * The return is <em>not</em> the invoked {@code builder.getBuilderForSubMessageField}.
+     *
+     * @param fieldDescriptor the field whose value is the submessage
+     * @return a new Builder for the sub message
+     */
+    public Builder getBuilderForSubMessageField(final FieldDescriptor fieldDescriptor) {
+      List<Builder> submessageBuilders = subtreeBuildersFromField.get(fieldDescriptor);
+      if (submessageBuilders == null) {
+        submessageBuilders = new ArrayList<Builder>();
+        subtreeBuildersFromField.put(fieldDescriptor, submessageBuilders);
+      }
+      Builder subtreeBuilder = new Builder();
+      submessageBuilders.add(subtreeBuilder);
+      return subtreeBuilder;
+    }
+
+    /**
+     * Build the {@code TextFormatParseInfoTree}.
+     *
+     * @return the {@code TextFormatParseInfoTree}
+     */
+    public TextFormatParseInfoTree build() {
+      return new TextFormatParseInfoTree(locationsFromField, subtreeBuildersFromField);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseLocation.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseLocation.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseLocation.java
new file mode 100644
index 0000000..89ca9d2
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TextFormatParseLocation.java
@@ -0,0 +1,104 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+import java.util.Arrays;
+
+/**
+ * A location in the source code.
+ *
+ * <p>A location is the starting line number and starting column number.
+ */
+public final class TextFormatParseLocation {
+
+  /**
+   * The empty location.
+   */
+  public static final TextFormatParseLocation EMPTY = new TextFormatParseLocation(-1, -1);
+
+  /**
+   * Create a location.
+   *
+   * @param line the starting line number
+   * @param column the starting column number
+   * @return a {@code ParseLocation}
+   */
+  static TextFormatParseLocation create(int line, int column) {
+    if (line == -1 && column == -1) {
+      return EMPTY;
+    }
+    if (line < 0 || column < 0) {
+      throw new IllegalArgumentException(
+          String.format("line and column values must be >= 0: line %d, column: %d", line, column));
+    }
+    return new TextFormatParseLocation(line, column);
+  }
+
+  private final int line;
+  private final int column;
+
+  private TextFormatParseLocation(int line, int column) {
+    this.line = line;
+    this.column = column;
+  }
+
+  public int getLine() {
+    return line;
+  }
+
+  public int getColumn() {
+    return column;
+  }
+
+  @Override
+  public String toString() {
+    return String.format("ParseLocation{line=%d, column=%d}", line, column);
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (o == this) {
+      return true;
+    }
+    if (!(o instanceof TextFormatParseLocation)) {
+      return false;
+    }
+    TextFormatParseLocation that = (TextFormatParseLocation) o;
+    return (this.line == that.getLine())
+         && (this.column == that.getColumn());
+  }
+
+  @Override
+  public int hashCode() {
+    int[] values = {line, column};
+    return Arrays.hashCode(values);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Timestamp.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Timestamp.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Timestamp.java
new file mode 100644
index 0000000..0023d50
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/Timestamp.java
@@ -0,0 +1,616 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/timestamp.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+/**
+ * <pre>
+ * A Timestamp represents a point in time independent of any time zone
+ * or calendar, represented as seconds and fractions of seconds at
+ * nanosecond resolution in UTC Epoch time. It is encoded using the
+ * Proleptic Gregorian Calendar which extends the Gregorian calendar
+ * backwards to year one. It is encoded assuming all minutes are 60
+ * seconds long, i.e. leap seconds are "smeared" so that no leap second
+ * table is needed for interpretation. Range is from
+ * 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+ * By restricting to that range, we ensure that we can convert to
+ * and from  RFC 3339 date strings.
+ * See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+ * Example 1: Compute Timestamp from POSIX `time()`.
+ *     Timestamp timestamp;
+ *     timestamp.set_seconds(time(NULL));
+ *     timestamp.set_nanos(0);
+ * Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+ *     struct timeval tv;
+ *     gettimeofday(&amp;tv, NULL);
+ *     Timestamp timestamp;
+ *     timestamp.set_seconds(tv.tv_sec);
+ *     timestamp.set_nanos(tv.tv_usec * 1000);
+ * Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+ *     FILETIME ft;
+ *     GetSystemTimeAsFileTime(&amp;ft);
+ *     UINT64 ticks = (((UINT64)ft.dwHighDateTime) &lt;&lt; 32) | ft.dwLowDateTime;
+ *     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+ *     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+ *     Timestamp timestamp;
+ *     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+ *     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+ * Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+ *     long millis = System.currentTimeMillis();
+ *     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+ *         .setNanos((int) ((millis % 1000) * 1000000)).build();
+ * Example 5: Compute Timestamp from current time in Python.
+ *     timestamp = Timestamp()
+ *     timestamp.GetCurrentTime()
+ * </pre>
+ *
+ * Protobuf type {@code google.protobuf.Timestamp}
+ */
+public  final class Timestamp extends
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+    // @@protoc_insertion_point(message_implements:google.protobuf.Timestamp)
+    TimestampOrBuilder {
+  // Use Timestamp.newBuilder() to construct.
+  private Timestamp(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+    super(builder);
+  }
+  private Timestamp() {
+    seconds_ = 0L;
+    nanos_ = 0;
+  }
+
+  @java.lang.Override
+  public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+  getUnknownFields() {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.getDefaultInstance();
+  }
+  private Timestamp(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+    this();
+    int mutable_bitField0_ = 0;
+    try {
+      boolean done = false;
+      while (!done) {
+        int tag = input.readTag();
+        switch (tag) {
+          case 0:
+            done = true;
+            break;
+          default: {
+            if (!input.skipField(tag)) {
+              done = true;
+            }
+            break;
+          }
+          case 8: {
+
+            seconds_ = input.readInt64();
+            break;
+          }
+          case 16: {
+
+            nanos_ = input.readInt32();
+            break;
+          }
+        }
+      }
+    } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+      throw e.setUnfinishedMessage(this);
+    } catch (java.io.IOException e) {
+      throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+          e).setUnfinishedMessage(this);
+    } finally {
+      makeExtensionsImmutable();
+    }
+  }
+  public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+      getDescriptor() {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampProto.internal_static_google_protobuf_Timestamp_descriptor;
+  }
+
+  protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internalGetFieldAccessorTable() {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampProto.internal_static_google_protobuf_Timestamp_fieldAccessorTable
+        .ensureFieldAccessorsInitialized(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.Builder.class);
+  }
+
+  public static final int SECONDS_FIELD_NUMBER = 1;
+  private long seconds_;
+  /**
+   * <pre>
+   * Represents seconds of UTC time since Unix epoch
+   * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+   * 9999-12-31T23:59:59Z inclusive.
+   * </pre>
+   *
+   * <code>int64 seconds = 1;</code>
+   */
+  public long getSeconds() {
+    return seconds_;
+  }
+
+  public static final int NANOS_FIELD_NUMBER = 2;
+  private int nanos_;
+  /**
+   * <pre>
+   * Non-negative fractions of a second at nanosecond resolution. Negative
+   * second values with fractions must still have non-negative nanos values
+   * that count forward in time. Must be from 0 to 999,999,999
+   * inclusive.
+   * </pre>
+   *
+   * <code>int32 nanos = 2;</code>
+   */
+  public int getNanos() {
+    return nanos_;
+  }
+
+  private byte memoizedIsInitialized = -1;
+  public final boolean isInitialized() {
+    byte isInitialized = memoizedIsInitialized;
+    if (isInitialized == 1) return true;
+    if (isInitialized == 0) return false;
+
+    memoizedIsInitialized = 1;
+    return true;
+  }
+
+  public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                      throws java.io.IOException {
+    if (seconds_ != 0L) {
+      output.writeInt64(1, seconds_);
+    }
+    if (nanos_ != 0) {
+      output.writeInt32(2, nanos_);
+    }
+  }
+
+  public int getSerializedSize() {
+    int size = memoizedSize;
+    if (size != -1) return size;
+
+    size = 0;
+    if (seconds_ != 0L) {
+      size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+        .computeInt64Size(1, seconds_);
+    }
+    if (nanos_ != 0) {
+      size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+        .computeInt32Size(2, nanos_);
+    }
+    memoizedSize = size;
+    return size;
+  }
+
+  private static final long serialVersionUID = 0L;
+  @java.lang.Override
+  public boolean equals(final java.lang.Object obj) {
+    if (obj == this) {
+     return true;
+    }
+    if (!(obj instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp)) {
+      return super.equals(obj);
+    }
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp other = (org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp) obj;
+
+    boolean result = true;
+    result = result && (getSeconds()
+        == other.getSeconds());
+    result = result && (getNanos()
+        == other.getNanos());
+    return result;
+  }
+
+  @java.lang.Override
+  public int hashCode() {
+    if (memoizedHashCode != 0) {
+      return memoizedHashCode;
+    }
+    int hash = 41;
+    hash = (19 * hash) + getDescriptor().hashCode();
+    hash = (37 * hash) + SECONDS_FIELD_NUMBER;
+    hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+        getSeconds());
+    hash = (37 * hash) + NANOS_FIELD_NUMBER;
+    hash = (53 * hash) + getNanos();
+    hash = (29 * hash) + unknownFields.hashCode();
+    memoizedHashCode = hash;
+    return hash;
+  }
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+      throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+    return PARSER.parseFrom(data);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+    return PARSER.parseFrom(data, extensionRegistry);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(byte[] data)
+      throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+    return PARSER.parseFrom(data);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      byte[] data,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+    return PARSER.parseFrom(data, extensionRegistry);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(java.io.InputStream input)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseWithIOException(PARSER, input);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      java.io.InputStream input,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseWithIOException(PARSER, input, extensionRegistry);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseDelimitedFrom(java.io.InputStream input)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseDelimitedWithIOException(PARSER, input);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseDelimitedFrom(
+      java.io.InputStream input,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseWithIOException(PARSER, input);
+  }
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parseFrom(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+      throws java.io.IOException {
+    return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+        .parseWithIOException(PARSER, input, extensionRegistry);
+  }
+
+  public Builder newBuilderForType() { return newBuilder(); }
+  public static Builder newBuilder() {
+    return DEFAULT_INSTANCE.toBuilder();
+  }
+  public static Builder newBuilder(org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp prototype) {
+    return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+  }
+  public Builder toBuilder() {
+    return this == DEFAULT_INSTANCE
+        ? new Builder() : new Builder().mergeFrom(this);
+  }
+
+  @java.lang.Override
+  protected Builder newBuilderForType(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+    Builder builder = new Builder(parent);
+    return builder;
+  }
+  /**
+   * <pre>
+   * A Timestamp represents a point in time independent of any time zone
+   * or calendar, represented as seconds and fractions of seconds at
+   * nanosecond resolution in UTC Epoch time. It is encoded using the
+   * Proleptic Gregorian Calendar which extends the Gregorian calendar
+   * backwards to year one. It is encoded assuming all minutes are 60
+   * seconds long, i.e. leap seconds are "smeared" so that no leap second
+   * table is needed for interpretation. Range is from
+   * 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+   * By restricting to that range, we ensure that we can convert to
+   * and from  RFC 3339 date strings.
+   * See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+   * Example 1: Compute Timestamp from POSIX `time()`.
+   *     Timestamp timestamp;
+   *     timestamp.set_seconds(time(NULL));
+   *     timestamp.set_nanos(0);
+   * Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+   *     struct timeval tv;
+   *     gettimeofday(&amp;tv, NULL);
+   *     Timestamp timestamp;
+   *     timestamp.set_seconds(tv.tv_sec);
+   *     timestamp.set_nanos(tv.tv_usec * 1000);
+   * Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+   *     FILETIME ft;
+   *     GetSystemTimeAsFileTime(&amp;ft);
+   *     UINT64 ticks = (((UINT64)ft.dwHighDateTime) &lt;&lt; 32) | ft.dwLowDateTime;
+   *     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+   *     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+   *     Timestamp timestamp;
+   *     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+   *     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+   * Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+   *     long millis = System.currentTimeMillis();
+   *     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+   *         .setNanos((int) ((millis % 1000) * 1000000)).build();
+   * Example 5: Compute Timestamp from current time in Python.
+   *     timestamp = Timestamp()
+   *     timestamp.GetCurrentTime()
+   * </pre>
+   *
+   * Protobuf type {@code google.protobuf.Timestamp}
+   */
+  public static final class Builder extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+      // @@protoc_insertion_point(builder_implements:google.protobuf.Timestamp)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampOrBuilder {
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampProto.internal_static_google_protobuf_Timestamp_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampProto.internal_static_google_protobuf_Timestamp_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.class, org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.Builder.class);
+    }
+
+    // Construct using org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.newBuilder()
+    private Builder() {
+      maybeForceBuilderInitialization();
+    }
+
+    private Builder(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      super(parent);
+      maybeForceBuilderInitialization();
+    }
+    private void maybeForceBuilderInitialization() {
+      if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+              .alwaysUseFieldBuilders) {
+      }
+    }
+    public Builder clear() {
+      super.clear();
+      seconds_ = 0L;
+
+      nanos_ = 0;
+
+      return this;
+    }
+
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptorForType() {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.TimestampProto.internal_static_google_protobuf_Timestamp_descriptor;
+    }
+
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp getDefaultInstanceForType() {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.getDefaultInstance();
+    }
+
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp build() {
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp result = buildPartial();
+      if (!result.isInitialized()) {
+        throw newUninitializedMessageException(result);
+      }
+      return result;
+    }
+
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp buildPartial() {
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp result = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp(this);
+      result.seconds_ = seconds_;
+      result.nanos_ = nanos_;
+      onBuilt();
+      return result;
+    }
+
+    public Builder clone() {
+      return (Builder) super.clone();
+    }
+    public Builder setField(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+        Object value) {
+      return (Builder) super.setField(field, value);
+    }
+    public Builder clearField(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+      return (Builder) super.clearField(field);
+    }
+    public Builder clearOneof(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+      return (Builder) super.clearOneof(oneof);
+    }
+    public Builder setRepeatedField(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+        int index, Object value) {
+      return (Builder) super.setRepeatedField(field, index, value);
+    }
+    public Builder addRepeatedField(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+        Object value) {
+      return (Builder) super.addRepeatedField(field, value);
+    }
+    public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+      if (other instanceof org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp) {
+        return mergeFrom((org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp)other);
+      } else {
+        super.mergeFrom(other);
+        return this;
+      }
+    }
+
+    public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp other) {
+      if (other == org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp.getDefaultInstance()) return this;
+      if (other.getSeconds() != 0L) {
+        setSeconds(other.getSeconds());
+      }
+      if (other.getNanos() != 0) {
+        setNanos(other.getNanos());
+      }
+      onChanged();
+      return this;
+    }
+
+    public final boolean isInitialized() {
+      return true;
+    }
+
+    public Builder mergeFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp parsedMessage = null;
+      try {
+        parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        parsedMessage = (org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp) e.getUnfinishedMessage();
+        throw e.unwrapIOException();
+      } finally {
+        if (parsedMessage != null) {
+          mergeFrom(parsedMessage);
+        }
+      }
+      return this;
+    }
+
+    private long seconds_ ;
+    /**
+     * <pre>
+     * Represents seconds of UTC time since Unix epoch
+     * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+     * 9999-12-31T23:59:59Z inclusive.
+     * </pre>
+     *
+     * <code>int64 seconds = 1;</code>
+     */
+    public long getSeconds() {
+      return seconds_;
+    }
+    /**
+     * <pre>
+     * Represents seconds of UTC time since Unix epoch
+     * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+     * 9999-12-31T23:59:59Z inclusive.
+     * </pre>
+     *
+     * <code>int64 seconds = 1;</code>
+     */
+    public Builder setSeconds(long value) {
+      
+      seconds_ = value;
+      onChanged();
+      return this;
+    }
+    /**
+     * <pre>
+     * Represents seconds of UTC time since Unix epoch
+     * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+     * 9999-12-31T23:59:59Z inclusive.
+     * </pre>
+     *
+     * <code>int64 seconds = 1;</code>
+     */
+    public Builder clearSeconds() {
+      
+      seconds_ = 0L;
+      onChanged();
+      return this;
+    }
+
+    private int nanos_ ;
+    /**
+     * <pre>
+     * Non-negative fractions of a second at nanosecond resolution. Negative
+     * second values with fractions must still have non-negative nanos values
+     * that count forward in time. Must be from 0 to 999,999,999
+     * inclusive.
+     * </pre>
+     *
+     * <code>int32 nanos = 2;</code>
+     */
+    public int getNanos() {
+      return nanos_;
+    }
+    /**
+     * <pre>
+     * Non-negative fractions of a second at nanosecond resolution. Negative
+     * second values with fractions must still have non-negative nanos values
+     * that count forward in time. Must be from 0 to 999,999,999
+     * inclusive.
+     * </pre>
+     *
+     * <code>int32 nanos = 2;</code>
+     */
+    public Builder setNanos(int value) {
+      
+      nanos_ = value;
+      onChanged();
+      return this;
+    }
+    /**
+     * <pre>
+     * Non-negative fractions of a second at nanosecond resolution. Negative
+     * second values with fractions must still have non-negative nanos values
+     * that count forward in time. Must be from 0 to 999,999,999
+     * inclusive.
+     * </pre>
+     *
+     * <code>int32 nanos = 2;</code>
+     */
+    public Builder clearNanos() {
+      
+      nanos_ = 0;
+      onChanged();
+      return this;
+    }
+    public final Builder setUnknownFields(
+        final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+      return this;
+    }
+
+    public final Builder mergeUnknownFields(
+        final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+      return this;
+    }
+
+
+    // @@protoc_insertion_point(builder_scope:google.protobuf.Timestamp)
+  }
+
+  // @@protoc_insertion_point(class_scope:google.protobuf.Timestamp)
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp DEFAULT_INSTANCE;
+  static {
+    DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp();
+  }
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp getDefaultInstance() {
+    return DEFAULT_INSTANCE;
+  }
+
+  private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Timestamp>
+      PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<Timestamp>() {
+    public Timestamp parsePartialFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+        return new Timestamp(input, extensionRegistry);
+    }
+  };
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Timestamp> parser() {
+    return PARSER;
+  }
+
+  @java.lang.Override
+  public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<Timestamp> getParserForType() {
+    return PARSER;
+  }
+
+  public org.apache.hadoop.hbase.shaded.com.google.protobuf.Timestamp getDefaultInstanceForType() {
+    return DEFAULT_INSTANCE;
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampOrBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampOrBuilder.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampOrBuilder.java
new file mode 100644
index 0000000..fca3c16
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampOrBuilder.java
@@ -0,0 +1,32 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/timestamp.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public interface TimestampOrBuilder extends
+    // @@protoc_insertion_point(interface_extends:google.protobuf.Timestamp)
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+  /**
+   * <pre>
+   * Represents seconds of UTC time since Unix epoch
+   * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+   * 9999-12-31T23:59:59Z inclusive.
+   * </pre>
+   *
+   * <code>int64 seconds = 1;</code>
+   */
+  long getSeconds();
+
+  /**
+   * <pre>
+   * Non-negative fractions of a second at nanosecond resolution. Negative
+   * second values with fractions must still have non-negative nanos values
+   * that count forward in time. Must be from 0 to 999,999,999
+   * inclusive.
+   * </pre>
+   *
+   * <code>int32 nanos = 2;</code>
+   */
+  int getNanos();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/6786b2b6/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampProto.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampProto.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampProto.java
new file mode 100644
index 0000000..c62c4ce
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/com/google/protobuf/TimestampProto.java
@@ -0,0 +1,59 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: google/protobuf/timestamp.proto
+
+package org.apache.hadoop.hbase.shaded.com.google.protobuf;
+
+public final class TimestampProto {
+  private TimestampProto() {}
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) {
+  }
+
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) {
+    registerAllExtensions(
+        (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry);
+  }
+  static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+    internal_static_google_protobuf_Timestamp_descriptor;
+  static final 
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+      internal_static_google_protobuf_Timestamp_fieldAccessorTable;
+
+  public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      getDescriptor() {
+    return descriptor;
+  }
+  private static  org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      descriptor;
+  static {
+    java.lang.String[] descriptorData = {
+      "\n\037google/protobuf/timestamp.proto\022\017googl" +
+      "e.protobuf\"+\n\tTimestamp\022\017\n\007seconds\030\001 \001(\003" +
+      "\022\r\n\005nanos\030\002 \001(\005B~\n\023com.google.protobufB\016" +
+      "TimestampProtoP\001Z+github.com/golang/prot" +
+      "obuf/ptypes/timestamp\370\001\001\242\002\003GPB\252\002\036Google." +
+      "Protobuf.WellKnownTypesb\006proto3"
+    };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor.    InternalDescriptorAssigner() {
+          public org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry assignDescriptors(
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor root) {
+            descriptor = root;
+            return null;
+          }
+        };
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor
+      .internalBuildGeneratedFileFrom(descriptorData,
+        new org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FileDescriptor[] {
+        }, assigner);
+    internal_static_google_protobuf_Timestamp_descriptor =
+      getDescriptor().getMessageTypes().get(0);
+    internal_static_google_protobuf_Timestamp_fieldAccessorTable = new
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+        internal_static_google_protobuf_Timestamp_descriptor,
+        new java.lang.String[] { "Seconds", "Nanos", });
+  }
+
+  // @@protoc_insertion_point(outer_class_scope)
+}