You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by da...@apache.org on 2015/09/22 07:03:33 UTC
[02/50] [abbrv] hive git commit: HIVE-11294 Use HBase to cache
aggregated stats (gates)
http://git-wip-us.apache.org/repos/asf/hive/blob/c53c6f45/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
----------------------------------------------------------------------
diff --git a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
index 2d9e592..314fc7f 100644
--- a/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
+++ b/metastore/src/gen/protobuf/gen-java/org/apache/hadoop/hive/metastore/hbase/HbaseMetastoreProto.java
@@ -90,6 +90,3688 @@ public final class HbaseMetastoreProto {
// @@protoc_insertion_point(enum_scope:org.apache.hadoop.hive.metastore.hbase.PrincipalType)
}
+ public interface AggrStatsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int64 parts_found = 1;
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ boolean hasPartsFound();
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ long getPartsFound();
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats>
+ getColStatsList();
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index);
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ int getColStatsCount();
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ java.util.List<? extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>
+ getColStatsOrBuilderList();
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder(
+ int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats}
+ */
+ public static final class AggrStats extends
+ com.google.protobuf.GeneratedMessage
+ implements AggrStatsOrBuilder {
+ // Use AggrStats.newBuilder() to construct.
+ private AggrStats(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private AggrStats(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final AggrStats defaultInstance;
+ public static AggrStats getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public AggrStats getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AggrStats(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ partsFound_ = input.readInt64();
+ break;
+ }
+ case 18: {
+ if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ colStats_ = new java.util.ArrayList<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats>();
+ mutable_bitField0_ |= 0x00000002;
+ }
+ colStats_.add(input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.PARSER, extensionRegistry));
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
+ colStats_ = java.util.Collections.unmodifiableList(colStats_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<AggrStats> PARSER =
+ new com.google.protobuf.AbstractParser<AggrStats>() {
+ public AggrStats parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new AggrStats(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<AggrStats> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required int64 parts_found = 1;
+ public static final int PARTS_FOUND_FIELD_NUMBER = 1;
+ private long partsFound_;
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public boolean hasPartsFound() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public long getPartsFound() {
+ return partsFound_;
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;
+ public static final int COL_STATS_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats> colStats_;
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats> getColStatsList() {
+ return colStats_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>
+ getColStatsOrBuilderList() {
+ return colStats_;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public int getColStatsCount() {
+ return colStats_.size();
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) {
+ return colStats_.get(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder(
+ int index) {
+ return colStats_.get(index);
+ }
+
+ private void initFields() {
+ partsFound_ = 0L;
+ colStats_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasPartsFound()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getColStatsCount(); i++) {
+ if (!getColStats(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(1, partsFound_);
+ }
+ for (int i = 0; i < colStats_.size(); i++) {
+ output.writeMessage(2, colStats_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(1, partsFound_);
+ }
+ for (int i = 0; i < colStats_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, colStats_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStats}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getColStatsFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ partsFound_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (colStatsBuilder_ == null) {
+ colStats_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ colStatsBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStats_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.partsFound_ = partsFound_;
+ if (colStatsBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ colStats_ = java.util.Collections.unmodifiableList(colStats_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.colStats_ = colStats_;
+ } else {
+ result.colStats_ = colStatsBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats.getDefaultInstance()) return this;
+ if (other.hasPartsFound()) {
+ setPartsFound(other.getPartsFound());
+ }
+ if (colStatsBuilder_ == null) {
+ if (!other.colStats_.isEmpty()) {
+ if (colStats_.isEmpty()) {
+ colStats_ = other.colStats_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureColStatsIsMutable();
+ colStats_.addAll(other.colStats_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.colStats_.isEmpty()) {
+ if (colStatsBuilder_.isEmpty()) {
+ colStatsBuilder_.dispose();
+ colStatsBuilder_ = null;
+ colStats_ = other.colStats_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ colStatsBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getColStatsFieldBuilder() : null;
+ } else {
+ colStatsBuilder_.addAllMessages(other.colStats_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasPartsFound()) {
+
+ return false;
+ }
+ for (int i = 0; i < getColStatsCount(); i++) {
+ if (!getColStats(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStats) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required int64 parts_found = 1;
+ private long partsFound_ ;
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public boolean hasPartsFound() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public long getPartsFound() {
+ return partsFound_;
+ }
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public Builder setPartsFound(long value) {
+ bitField0_ |= 0x00000001;
+ partsFound_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required int64 parts_found = 1;</code>
+ */
+ public Builder clearPartsFound() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ partsFound_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;
+ private java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats> colStats_ =
+ java.util.Collections.emptyList();
+ private void ensureColStatsIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ colStats_ = new java.util.ArrayList<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats>(colStats_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder> colStatsBuilder_;
+
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats> getColStatsList() {
+ if (colStatsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(colStats_);
+ } else {
+ return colStatsBuilder_.getMessageList();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public int getColStatsCount() {
+ if (colStatsBuilder_ == null) {
+ return colStats_.size();
+ } else {
+ return colStatsBuilder_.getCount();
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats getColStats(int index) {
+ if (colStatsBuilder_ == null) {
+ return colStats_.get(index);
+ } else {
+ return colStatsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder setColStats(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) {
+ if (colStatsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColStatsIsMutable();
+ colStats_.set(index, value);
+ onChanged();
+ } else {
+ colStatsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder setColStats(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) {
+ if (colStatsBuilder_ == null) {
+ ensureColStatsIsMutable();
+ colStats_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ colStatsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder addColStats(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) {
+ if (colStatsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColStatsIsMutable();
+ colStats_.add(value);
+ onChanged();
+ } else {
+ colStatsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder addColStats(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats value) {
+ if (colStatsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureColStatsIsMutable();
+ colStats_.add(index, value);
+ onChanged();
+ } else {
+ colStatsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder addColStats(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) {
+ if (colStatsBuilder_ == null) {
+ ensureColStatsIsMutable();
+ colStats_.add(builderForValue.build());
+ onChanged();
+ } else {
+ colStatsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder addColStats(
+ int index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder builderForValue) {
+ if (colStatsBuilder_ == null) {
+ ensureColStatsIsMutable();
+ colStats_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ colStatsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder addAllColStats(
+ java.lang.Iterable<? extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats> values) {
+ if (colStatsBuilder_ == null) {
+ ensureColStatsIsMutable();
+ super.addAll(values, colStats_);
+ onChanged();
+ } else {
+ colStatsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder clearColStats() {
+ if (colStatsBuilder_ == null) {
+ colStats_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ colStatsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public Builder removeColStats(int index) {
+ if (colStatsBuilder_ == null) {
+ ensureColStatsIsMutable();
+ colStats_.remove(index);
+ onChanged();
+ } else {
+ colStatsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder getColStatsBuilder(
+ int index) {
+ return getColStatsFieldBuilder().getBuilder(index);
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder getColStatsOrBuilder(
+ int index) {
+ if (colStatsBuilder_ == null) {
+ return colStats_.get(index); } else {
+ return colStatsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public java.util.List<? extends org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>
+ getColStatsOrBuilderList() {
+ if (colStatsBuilder_ != null) {
+ return colStatsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(colStats_);
+ }
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder() {
+ return getColStatsFieldBuilder().addBuilder(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder addColStatsBuilder(
+ int index) {
+ return getColStatsFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.getDefaultInstance());
+ }
+ /**
+ * <code>repeated .org.apache.hadoop.hive.metastore.hbase.ColumnStats col_stats = 2;</code>
+ */
+ public java.util.List<org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder>
+ getColStatsBuilderList() {
+ return getColStatsFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>
+ getColStatsFieldBuilder() {
+ if (colStatsBuilder_ == null) {
+ colStatsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStats.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.ColumnStatsOrBuilder>(
+ colStats_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ colStats_ = null;
+ }
+ return colStatsBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats)
+ }
+
+ static {
+ defaultInstance = new AggrStats(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStats)
+ }
+
+ public interface AggrStatsBloomFilterOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes db_name = 1;
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ boolean hasDbName();
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ com.google.protobuf.ByteString getDbName();
+
+ // required bytes table_name = 2;
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ com.google.protobuf.ByteString getTableName();
+
+ // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ boolean hasBloomFilter();
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter();
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder();
+
+ // required int64 aggregated_at = 4;
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ boolean hasAggregatedAt();
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ long getAggregatedAt();
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter}
+ */
+ public static final class AggrStatsBloomFilter extends
+ com.google.protobuf.GeneratedMessage
+ implements AggrStatsBloomFilterOrBuilder {
+ // Use AggrStatsBloomFilter.newBuilder() to construct.
+ private AggrStatsBloomFilter(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private AggrStatsBloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final AggrStatsBloomFilter defaultInstance;
+ public static AggrStatsBloomFilter getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public AggrStatsBloomFilter getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private AggrStatsBloomFilter(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ dbName_ = input.readBytes();
+ break;
+ }
+ case 18: {
+ bitField0_ |= 0x00000002;
+ tableName_ = input.readBytes();
+ break;
+ }
+ case 26: {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ subBuilder = bloomFilter_.toBuilder();
+ }
+ bloomFilter_ = input.readMessage(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(bloomFilter_);
+ bloomFilter_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000004;
+ break;
+ }
+ case 32: {
+ bitField0_ |= 0x00000008;
+ aggregatedAt_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<AggrStatsBloomFilter> PARSER =
+ new com.google.protobuf.AbstractParser<AggrStatsBloomFilter>() {
+ public AggrStatsBloomFilter parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new AggrStatsBloomFilter(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<AggrStatsBloomFilter> getParserForType() {
+ return PARSER;
+ }
+
+ public interface BloomFilterOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int32 num_bits = 1;
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ boolean hasNumBits();
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ int getNumBits();
+
+ // required int32 num_funcs = 2;
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ boolean hasNumFuncs();
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ int getNumFuncs();
+
+ // repeated int64 bits = 3;
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ java.util.List<java.lang.Long> getBitsList();
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ int getBitsCount();
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ long getBits(int index);
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter}
+ */
+ public static final class BloomFilter extends
+ com.google.protobuf.GeneratedMessage
+ implements BloomFilterOrBuilder {
+ // Use BloomFilter.newBuilder() to construct.
+ private BloomFilter(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private BloomFilter(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final BloomFilter defaultInstance;
+ public static BloomFilter getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public BloomFilter getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private BloomFilter(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ numBits_ = input.readInt32();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ numFuncs_ = input.readInt32();
+ break;
+ }
+ case 24: {
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ bits_ = new java.util.ArrayList<java.lang.Long>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ bits_.add(input.readInt64());
+ break;
+ }
+ case 26: {
+ int length = input.readRawVarint32();
+ int limit = input.pushLimit(length);
+ if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) {
+ bits_ = new java.util.ArrayList<java.lang.Long>();
+ mutable_bitField0_ |= 0x00000004;
+ }
+ while (input.getBytesUntilLimit() > 0) {
+ bits_.add(input.readInt64());
+ }
+ input.popLimit(limit);
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
+ bits_ = java.util.Collections.unmodifiableList(bits_);
+ }
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<BloomFilter> PARSER =
+ new com.google.protobuf.AbstractParser<BloomFilter>() {
+ public BloomFilter parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new BloomFilter(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<BloomFilter> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required int32 num_bits = 1;
+ public static final int NUM_BITS_FIELD_NUMBER = 1;
+ private int numBits_;
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public boolean hasNumBits() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public int getNumBits() {
+ return numBits_;
+ }
+
+ // required int32 num_funcs = 2;
+ public static final int NUM_FUNCS_FIELD_NUMBER = 2;
+ private int numFuncs_;
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public boolean hasNumFuncs() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public int getNumFuncs() {
+ return numFuncs_;
+ }
+
+ // repeated int64 bits = 3;
+ public static final int BITS_FIELD_NUMBER = 3;
+ private java.util.List<java.lang.Long> bits_;
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public java.util.List<java.lang.Long>
+ getBitsList() {
+ return bits_;
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public int getBitsCount() {
+ return bits_.size();
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public long getBits(int index) {
+ return bits_.get(index);
+ }
+
+ private void initFields() {
+ numBits_ = 0;
+ numFuncs_ = 0;
+ bits_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasNumBits()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasNumFuncs()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt32(1, numBits_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeInt32(2, numFuncs_);
+ }
+ for (int i = 0; i < bits_.size(); i++) {
+ output.writeInt64(3, bits_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(1, numBits_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, numFuncs_);
+ }
+ {
+ int dataSize = 0;
+ for (int i = 0; i < bits_.size(); i++) {
+ dataSize += com.google.protobuf.CodedOutputStream
+ .computeInt64SizeNoTag(bits_.get(i));
+ }
+ size += dataSize;
+ size += 1 * getBitsList().size();
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ numBits_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ numFuncs_ = 0;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ bits_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_BloomFilter_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.numBits_ = numBits_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.numFuncs_ = numFuncs_;
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ bits_ = java.util.Collections.unmodifiableList(bits_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.bits_ = bits_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) return this;
+ if (other.hasNumBits()) {
+ setNumBits(other.getNumBits());
+ }
+ if (other.hasNumFuncs()) {
+ setNumFuncs(other.getNumFuncs());
+ }
+ if (!other.bits_.isEmpty()) {
+ if (bits_.isEmpty()) {
+ bits_ = other.bits_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureBitsIsMutable();
+ bits_.addAll(other.bits_);
+ }
+ onChanged();
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasNumBits()) {
+
+ return false;
+ }
+ if (!hasNumFuncs()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required int32 num_bits = 1;
+ private int numBits_ ;
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public boolean hasNumBits() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public int getNumBits() {
+ return numBits_;
+ }
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public Builder setNumBits(int value) {
+ bitField0_ |= 0x00000001;
+ numBits_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required int32 num_bits = 1;</code>
+ */
+ public Builder clearNumBits() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ numBits_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // required int32 num_funcs = 2;
+ private int numFuncs_ ;
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public boolean hasNumFuncs() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public int getNumFuncs() {
+ return numFuncs_;
+ }
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public Builder setNumFuncs(int value) {
+ bitField0_ |= 0x00000002;
+ numFuncs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required int32 num_funcs = 2;</code>
+ */
+ public Builder clearNumFuncs() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ numFuncs_ = 0;
+ onChanged();
+ return this;
+ }
+
+ // repeated int64 bits = 3;
+ private java.util.List<java.lang.Long> bits_ = java.util.Collections.emptyList();
+ private void ensureBitsIsMutable() {
+ if (!((bitField0_ & 0x00000004) == 0x00000004)) {
+ bits_ = new java.util.ArrayList<java.lang.Long>(bits_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public java.util.List<java.lang.Long>
+ getBitsList() {
+ return java.util.Collections.unmodifiableList(bits_);
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public int getBitsCount() {
+ return bits_.size();
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public long getBits(int index) {
+ return bits_.get(index);
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public Builder setBits(
+ int index, long value) {
+ ensureBitsIsMutable();
+ bits_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public Builder addBits(long value) {
+ ensureBitsIsMutable();
+ bits_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public Builder addAllBits(
+ java.lang.Iterable<? extends java.lang.Long> values) {
+ ensureBitsIsMutable();
+ super.addAll(values, bits_);
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>repeated int64 bits = 3;</code>
+ */
+ public Builder clearBits() {
+ bits_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter)
+ }
+
+ static {
+ defaultInstance = new BloomFilter(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter)
+ }
+
+ private int bitField0_;
+ // required bytes db_name = 1;
+ public static final int DB_NAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString dbName_;
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public boolean hasDbName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public com.google.protobuf.ByteString getDbName() {
+ return dbName_;
+ }
+
+ // required bytes table_name = 2;
+ public static final int TABLE_NAME_FIELD_NUMBER = 2;
+ private com.google.protobuf.ByteString tableName_;
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public com.google.protobuf.ByteString getTableName() {
+ return tableName_;
+ }
+
+ // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;
+ public static final int BLOOM_FILTER_FIELD_NUMBER = 3;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_;
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public boolean hasBloomFilter() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() {
+ return bloomFilter_;
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() {
+ return bloomFilter_;
+ }
+
+ // required int64 aggregated_at = 4;
+ public static final int AGGREGATED_AT_FIELD_NUMBER = 4;
+ private long aggregatedAt_;
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ public boolean hasAggregatedAt() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ public long getAggregatedAt() {
+ return aggregatedAt_;
+ }
+
+ private void initFields() {
+ dbName_ = com.google.protobuf.ByteString.EMPTY;
+ tableName_ = com.google.protobuf.ByteString.EMPTY;
+ bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance();
+ aggregatedAt_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasDbName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasBloomFilter()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasAggregatedAt()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getBloomFilter().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, dbName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeBytes(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ output.writeMessage(3, bloomFilter_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ output.writeInt64(4, aggregatedAt_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, dbName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(2, tableName_);
+ }
+ if (((bitField0_ & 0x00000004) == 0x00000004)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, bloomFilter_);
+ }
+ if (((bitField0_ & 0x00000008) == 0x00000008)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(4, aggregatedAt_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilterOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.class, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getBloomFilterFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ dbName_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ tableName_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ if (bloomFilterBuilder_ == null) {
+ bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance();
+ } else {
+ bloomFilterBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ aggregatedAt_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000008);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.internal_static_org_apache_hadoop_hive_metastore_hbase_AggrStatsBloomFilter_descriptor;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter getDefaultInstanceForType() {
+ return org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter build() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter buildPartial() {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter result = new org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.dbName_ = dbName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.tableName_ = tableName_;
+ if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+ to_bitField0_ |= 0x00000004;
+ }
+ if (bloomFilterBuilder_ == null) {
+ result.bloomFilter_ = bloomFilter_;
+ } else {
+ result.bloomFilter_ = bloomFilterBuilder_.build();
+ }
+ if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+ to_bitField0_ |= 0x00000008;
+ }
+ result.aggregatedAt_ = aggregatedAt_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) {
+ return mergeFrom((org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter other) {
+ if (other == org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.getDefaultInstance()) return this;
+ if (other.hasDbName()) {
+ setDbName(other.getDbName());
+ }
+ if (other.hasTableName()) {
+ setTableName(other.getTableName());
+ }
+ if (other.hasBloomFilter()) {
+ mergeBloomFilter(other.getBloomFilter());
+ }
+ if (other.hasAggregatedAt()) {
+ setAggregatedAt(other.getAggregatedAt());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasDbName()) {
+
+ return false;
+ }
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!hasBloomFilter()) {
+
+ return false;
+ }
+ if (!hasAggregatedAt()) {
+
+ return false;
+ }
+ if (!getBloomFilter().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required bytes db_name = 1;
+ private com.google.protobuf.ByteString dbName_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public boolean hasDbName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public com.google.protobuf.ByteString getDbName() {
+ return dbName_;
+ }
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public Builder setDbName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ dbName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes db_name = 1;</code>
+ */
+ public Builder clearDbName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ dbName_ = getDefaultInstance().getDbName();
+ onChanged();
+ return this;
+ }
+
+ // required bytes table_name = 2;
+ private com.google.protobuf.ByteString tableName_ = com.google.protobuf.ByteString.EMPTY;
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public com.google.protobuf.ByteString getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public Builder setTableName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ tableName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required bytes table_name = 2;</code>
+ */
+ public Builder clearTableName() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ tableName_ = getDefaultInstance().getTableName();
+ onChanged();
+ return this;
+ }
+
+ // required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;
+ private org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder> bloomFilterBuilder_;
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public boolean hasBloomFilter() {
+ return ((bitField0_ & 0x00000004) == 0x00000004);
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter getBloomFilter() {
+ if (bloomFilterBuilder_ == null) {
+ return bloomFilter_;
+ } else {
+ return bloomFilterBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public Builder setBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) {
+ if (bloomFilterBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bloomFilter_ = value;
+ onChanged();
+ } else {
+ bloomFilterBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public Builder setBloomFilter(
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder builderForValue) {
+ if (bloomFilterBuilder_ == null) {
+ bloomFilter_ = builderForValue.build();
+ onChanged();
+ } else {
+ bloomFilterBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public Builder mergeBloomFilter(org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter value) {
+ if (bloomFilterBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) == 0x00000004) &&
+ bloomFilter_ != org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance()) {
+ bloomFilter_ =
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.newBuilder(bloomFilter_).mergeFrom(value).buildPartial();
+ } else {
+ bloomFilter_ = value;
+ }
+ onChanged();
+ } else {
+ bloomFilterBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000004;
+ return this;
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public Builder clearBloomFilter() {
+ if (bloomFilterBuilder_ == null) {
+ bloomFilter_ = org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.getDefaultInstance();
+ onChanged();
+ } else {
+ bloomFilterBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
+ return this;
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder getBloomFilterBuilder() {
+ bitField0_ |= 0x00000004;
+ onChanged();
+ return getBloomFilterFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ public org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder getBloomFilterOrBuilder() {
+ if (bloomFilterBuilder_ != null) {
+ return bloomFilterBuilder_.getMessageOrBuilder();
+ } else {
+ return bloomFilter_;
+ }
+ }
+ /**
+ * <code>required .org.apache.hadoop.hive.metastore.hbase.AggrStatsBloomFilter.BloomFilter bloom_filter = 3;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder>
+ getBloomFilterFieldBuilder() {
+ if (bloomFilterBuilder_ == null) {
+ bloomFilterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilter.Builder, org.apache.hadoop.hive.metastore.hbase.HbaseMetastoreProto.AggrStatsBloomFilter.BloomFilterOrBuilder>(
+ bloomFilter_,
+ getParentForChildren(),
+ isClean());
+ bloomFilter_ = null;
+ }
+ return bloomFilterBuilder_;
+ }
+
+ // required int64 aggregated_at = 4;
+ private long aggregatedAt_ ;
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ public boolean hasAggregatedAt() {
+ return ((bitField0_ & 0x00000008) == 0x00000008);
+ }
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ public long getAggregatedAt() {
+ return aggregatedAt_;
+ }
+ /**
+ * <code>required int64 aggregated_at = 4;</code>
+ */
+ public Builder setAggregatedAt(long value) {
+ bitField0_ |= 0x00000008;
+
<TRUNCATED>