You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2013/08/14 02:34:30 UTC
svn commit: r1513703 [3/3] - in /hbase/trunk:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/
hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/
hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/
hbase-protoco...
Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterMonitorProtos.java Wed Aug 14 00:34:30 2013
@@ -2104,6 +2104,899 @@ public final class MasterMonitorProtos {
// @@protoc_insertion_point(class_scope:GetTableDescriptorsResponse)
}
+ public interface GetTableNamesRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+ }
+ public static final class GetTableNamesRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements GetTableNamesRequestOrBuilder {
+ // Use GetTableNamesRequest.newBuilder() to construct.
+ private GetTableNamesRequest(Builder builder) {
+ super(builder);
+ }
+ private GetTableNamesRequest(boolean noInit) {}
+
+ private static final GetTableNamesRequest defaultInstance;
+ public static GetTableNamesRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetTableNamesRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesRequest_fieldAccessorTable;
+ }
+
+ private void initFields() {
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest) obj;
+
+ boolean result = true;
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest(this);
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.getDefaultInstance()) return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:GetTableNamesRequest)
+ }
+
+ static {
+ defaultInstance = new GetTableNamesRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetTableNamesRequest)
+ }
+
+ public interface GetTableNamesResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // repeated .TableName table_names = 1;
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>
+ getTableNamesList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index);
+ int getTableNamesCount();
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNamesOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+ int index);
+ }
+ public static final class GetTableNamesResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements GetTableNamesResponseOrBuilder {
+ // Use GetTableNamesResponse.newBuilder() to construct.
+ private GetTableNamesResponse(Builder builder) {
+ super(builder);
+ }
+ private GetTableNamesResponse(boolean noInit) {}
+
+ private static final GetTableNamesResponse defaultInstance;
+ public static GetTableNamesResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetTableNamesResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesResponse_fieldAccessorTable;
+ }
+
+ // repeated .TableName table_names = 1;
+ public static final int TABLE_NAMES_FIELD_NUMBER = 1;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_;
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
+ return tableNames_;
+ }
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNamesOrBuilderList() {
+ return tableNames_;
+ }
+ public int getTableNamesCount() {
+ return tableNames_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
+ return tableNames_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+ int index) {
+ return tableNames_.get(index);
+ }
+
+ private void initFields() {
+ tableNames_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ for (int i = 0; i < getTableNamesCount(); i++) {
+ if (!getTableNames(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ for (int i = 0; i < tableNames_.size(); i++) {
+ output.writeMessage(1, tableNames_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ for (int i = 0; i < tableNames_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, tableNames_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse) obj;
+
+ boolean result = true;
+ result = result && getTableNamesList()
+ .equals(other.getTableNamesList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (getTableNamesCount() > 0) {
+ hash = (37 * hash) + TABLE_NAMES_FIELD_NUMBER;
+ hash = (53 * hash) + getTableNamesList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.internal_static_GetTableNamesResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableNamesFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableNamesBuilder_ == null) {
+ tableNames_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ tableNamesBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse(this);
+ int from_bitField0_ = bitField0_;
+ if (tableNamesBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ tableNames_ = java.util.Collections.unmodifiableList(tableNames_);
+ bitField0_ = (bitField0_ & ~0x00000001);
+ }
+ result.tableNames_ = tableNames_;
+ } else {
+ result.tableNames_ = tableNamesBuilder_.build();
+ }
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance()) return this;
+ if (tableNamesBuilder_ == null) {
+ if (!other.tableNames_.isEmpty()) {
+ if (tableNames_.isEmpty()) {
+ tableNames_ = other.tableNames_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ } else {
+ ensureTableNamesIsMutable();
+ tableNames_.addAll(other.tableNames_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.tableNames_.isEmpty()) {
+ if (tableNamesBuilder_.isEmpty()) {
+ tableNamesBuilder_.dispose();
+ tableNamesBuilder_ = null;
+ tableNames_ = other.tableNames_;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ tableNamesBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getTableNamesFieldBuilder() : null;
+ } else {
+ tableNamesBuilder_.addAllMessages(other.tableNames_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ for (int i = 0; i < getTableNamesCount(); i++) {
+ if (!getTableNames(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addTableNames(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // repeated .TableName table_names = 1;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> tableNames_ =
+ java.util.Collections.emptyList();
+ private void ensureTableNamesIsMutable() {
+ if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+ tableNames_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName>(tableNames_);
+ bitField0_ |= 0x00000001;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNamesBuilder_;
+
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> getTableNamesList() {
+ if (tableNamesBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(tableNames_);
+ } else {
+ return tableNamesBuilder_.getMessageList();
+ }
+ }
+ public int getTableNamesCount() {
+ if (tableNamesBuilder_ == null) {
+ return tableNames_.size();
+ } else {
+ return tableNamesBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableNames(int index) {
+ if (tableNamesBuilder_ == null) {
+ return tableNames_.get(index);
+ } else {
+ return tableNamesBuilder_.getMessage(index);
+ }
+ }
+ public Builder setTableNames(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNamesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNamesIsMutable();
+ tableNames_.set(index, value);
+ onChanged();
+ } else {
+ tableNamesBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setTableNames(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNamesBuilder_ == null) {
+ ensureTableNamesIsMutable();
+ tableNames_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableNamesBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addTableNames(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNamesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNamesIsMutable();
+ tableNames_.add(value);
+ onChanged();
+ } else {
+ tableNamesBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addTableNames(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNamesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureTableNamesIsMutable();
+ tableNames_.add(index, value);
+ onChanged();
+ } else {
+ tableNamesBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addTableNames(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNamesBuilder_ == null) {
+ ensureTableNamesIsMutable();
+ tableNames_.add(builderForValue.build());
+ onChanged();
+ } else {
+ tableNamesBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addTableNames(
+ int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNamesBuilder_ == null) {
+ ensureTableNamesIsMutable();
+ tableNames_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ tableNamesBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllTableNames(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName> values) {
+ if (tableNamesBuilder_ == null) {
+ ensureTableNamesIsMutable();
+ super.addAll(values, tableNames_);
+ onChanged();
+ } else {
+ tableNamesBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearTableNames() {
+ if (tableNamesBuilder_ == null) {
+ tableNames_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ } else {
+ tableNamesBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeTableNames(int index) {
+ if (tableNamesBuilder_ == null) {
+ ensureTableNamesIsMutable();
+ tableNames_.remove(index);
+ onChanged();
+ } else {
+ tableNamesBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNamesBuilder(
+ int index) {
+ return getTableNamesFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNamesOrBuilder(
+ int index) {
+ if (tableNamesBuilder_ == null) {
+ return tableNames_.get(index); } else {
+ return tableNamesBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNamesOrBuilderList() {
+ if (tableNamesBuilder_ != null) {
+ return tableNamesBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(tableNames_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder() {
+ return getTableNamesFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder addTableNamesBuilder(
+ int index) {
+ return getTableNamesFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance());
+ }
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder>
+ getTableNamesBuilderList() {
+ return getTableNamesFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNamesFieldBuilder() {
+ if (tableNamesBuilder_ == null) {
+ tableNamesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableNames_,
+ ((bitField0_ & 0x00000001) == 0x00000001),
+ getParentForChildren(),
+ isClean());
+ tableNames_ = null;
+ }
+ return tableNamesBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetTableNamesResponse)
+ }
+
+ static {
+ defaultInstance = new GetTableNamesResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetTableNamesResponse)
+ }
+
public interface GetClusterStatusRequestOrBuilder
extends com.google.protobuf.MessageOrBuilder {
}
@@ -2892,6 +3785,11 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse> done);
+ public abstract void getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse> done);
+
public abstract void getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
@@ -2924,6 +3822,14 @@ public final class MasterMonitorProtos {
}
@java.lang.Override
+ public void getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse> done) {
+ impl.getTableNames(controller, request, done);
+ }
+
+ @java.lang.Override
public void getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
@@ -2966,8 +3872,10 @@ public final class MasterMonitorProtos {
case 1:
return impl.getTableDescriptors(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest)request);
case 2:
- return impl.getClusterStatus(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest)request);
+ return impl.getTableNames(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest)request);
case 3:
+ return impl.getClusterStatus(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest)request);
+ case 4:
return impl.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -2988,8 +3896,10 @@ public final class MasterMonitorProtos {
case 1:
return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance();
case 2:
- return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.getDefaultInstance();
case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+ case 4:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -3010,8 +3920,10 @@ public final class MasterMonitorProtos {
case 1:
return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance();
case 2:
- return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance();
case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+ case 4:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -3031,6 +3943,11 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse> done);
+ public abstract void getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse> done);
+
public abstract void getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
@@ -3074,11 +3991,16 @@ public final class MasterMonitorProtos {
done));
return;
case 2:
+ this.getTableNames(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse>specializeCallback(
+ done));
+ return;
+ case 3:
this.getClusterStatus(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse>specializeCallback(
done));
return;
- case 3:
+ case 4:
this.isMasterRunning(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest)request,
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse>specializeCallback(
done));
@@ -3102,8 +4024,10 @@ public final class MasterMonitorProtos {
case 1:
return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest.getDefaultInstance();
case 2:
- return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.getDefaultInstance();
case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.getDefaultInstance();
+ case 4:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -3124,8 +4048,10 @@ public final class MasterMonitorProtos {
case 1:
return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance();
case 2:
- return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance();
case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance();
+ case 4:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
@@ -3178,12 +4104,27 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.getDefaultInstance()));
}
+ public void getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance()));
+ }
+
public void getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse> done) {
channel.callMethod(
- getDescriptor().getMethods().get(2),
+ getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance(),
@@ -3198,7 +4139,7 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse> done) {
channel.callMethod(
- getDescriptor().getMethods().get(3),
+ getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance(),
@@ -3225,6 +4166,11 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest request)
throws com.google.protobuf.ServiceException;
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request)
+ throws com.google.protobuf.ServiceException;
+
public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request)
@@ -3267,12 +4213,24 @@ public final class MasterMonitorProtos {
}
+ public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse getTableNames(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(2),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.getDefaultInstance());
+ }
+
+
public org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse getClusterStatus(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(2),
+ getDescriptor().getMethods().get(3),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusResponse.getDefaultInstance());
@@ -3284,7 +4242,7 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest request)
throws com.google.protobuf.ServiceException {
return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse) channel.callBlockingMethod(
- getDescriptor().getMethods().get(3),
+ getDescriptor().getMethods().get(4),
controller,
request,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse.getDefaultInstance());
@@ -3314,6 +4272,16 @@ public final class MasterMonitorProtos {
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_GetTableDescriptorsResponse_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetTableNamesRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetTableNamesRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetTableNamesResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetTableNamesResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
internal_static_GetClusterStatusRequest_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
@@ -3340,20 +4308,23 @@ public final class MasterMonitorProtos {
"otal_regions\030\002 \001(\r\"=\n\032GetTableDescriptor" +
"sRequest\022\037\n\013table_names\030\001 \003(\0132\n.TableNam" +
"e\"A\n\033GetTableDescriptorsResponse\022\"\n\014tabl" +
- "e_schema\030\001 \003(\0132\014.TableSchema\"\031\n\027GetClust" +
- "erStatusRequest\"B\n\030GetClusterStatusRespo",
- "nse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterSta" +
- "tus2\314\002\n\024MasterMonitorService\022S\n\024GetSchem" +
- "aAlterStatus\022\034.GetSchemaAlterStatusReque" +
- "st\032\035.GetSchemaAlterStatusResponse\022P\n\023Get" +
- "TableDescriptors\022\033.GetTableDescriptorsRe" +
- "quest\032\034.GetTableDescriptorsResponse\022G\n\020G" +
- "etClusterStatus\022\030.GetClusterStatusReques" +
- "t\032\031.GetClusterStatusResponse\022D\n\017IsMaster" +
- "Running\022\027.IsMasterRunningRequest\032\030.IsMas" +
- "terRunningResponseBI\n*org.apache.hadoop.",
- "hbase.protobuf.generatedB\023MasterMonitorP" +
- "rotosH\001\210\001\001\240\001\001"
+ "e_schema\030\001 \003(\0132\014.TableSchema\"\026\n\024GetTable" +
+ "NamesRequest\"8\n\025GetTableNamesResponse\022\037\n",
+ "\013table_names\030\001 \003(\0132\n.TableName\"\031\n\027GetClu" +
+ "sterStatusRequest\"B\n\030GetClusterStatusRes" +
+ "ponse\022&\n\016cluster_status\030\001 \002(\0132\016.ClusterS" +
+ "tatus2\214\003\n\024MasterMonitorService\022S\n\024GetSch" +
+ "emaAlterStatus\022\034.GetSchemaAlterStatusReq" +
+ "uest\032\035.GetSchemaAlterStatusResponse\022P\n\023G" +
+ "etTableDescriptors\022\033.GetTableDescriptors" +
+ "Request\032\034.GetTableDescriptorsResponse\022>\n" +
+ "\rGetTableNames\022\025.GetTableNamesRequest\032\026." +
+ "GetTableNamesResponse\022G\n\020GetClusterStatu",
+ "s\022\030.GetClusterStatusRequest\032\031.GetCluster" +
+ "StatusResponse\022D\n\017IsMasterRunning\022\027.IsMa" +
+ "sterRunningRequest\032\030.IsMasterRunningResp" +
+ "onseBI\n*org.apache.hadoop.hbase.protobuf" +
+ ".generatedB\023MasterMonitorProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3392,8 +4363,24 @@ public final class MasterMonitorProtos {
new java.lang.String[] { "TableSchema", },
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse.Builder.class);
- internal_static_GetClusterStatusRequest_descriptor =
+ internal_static_GetTableNamesRequest_descriptor =
getDescriptor().getMessageTypes().get(4);
+ internal_static_GetTableNamesRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetTableNamesRequest_descriptor,
+ new java.lang.String[] { },
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest.Builder.class);
+ internal_static_GetTableNamesResponse_descriptor =
+ getDescriptor().getMessageTypes().get(5);
+ internal_static_GetTableNamesResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetTableNamesResponse_descriptor,
+ new java.lang.String[] { "TableNames", },
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse.Builder.class);
+ internal_static_GetClusterStatusRequest_descriptor =
+ getDescriptor().getMessageTypes().get(6);
internal_static_GetClusterStatusRequest_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetClusterStatusRequest_descriptor,
@@ -3401,7 +4388,7 @@ public final class MasterMonitorProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.class,
org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest.Builder.class);
internal_static_GetClusterStatusResponse_descriptor =
- getDescriptor().getMessageTypes().get(5);
+ getDescriptor().getMessageTypes().get(7);
internal_static_GetClusterStatusResponse_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_GetClusterStatusResponse_descriptor,
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MasterAdmin.proto Wed Aug 14 00:34:30 2013
@@ -176,14 +176,21 @@ message ListNamespaceDescriptorsResponse
repeated NamespaceDescriptor namespaceDescriptor = 1;
}
-message GetTableDescriptorsByNamespaceRequest {
+message ListTableDescriptorsByNamespaceRequest {
required string namespaceName = 1;
}
-message GetTableDescriptorsByNamespaceResponse {
+message ListTableDescriptorsByNamespaceResponse {
repeated TableSchema tableSchema = 1;
}
+message ListTableNamesByNamespaceRequest {
+ required string namespaceName = 1;
+}
+
+message ListTableNamesByNamespaceResponse {
+ repeated TableName tableName = 1;
+}
/* Cluster-level protobufs */
@@ -454,6 +461,10 @@ service MasterAdminService {
returns(ListNamespaceDescriptorsResponse);
/** returns a list of tables for a given namespace*/
- rpc GetTableDescriptorsByNamespace(GetTableDescriptorsByNamespaceRequest)
- returns(GetTableDescriptorsByNamespaceResponse);
+ rpc ListTableDescriptorsByNamespace(ListTableDescriptorsByNamespaceRequest)
+ returns(ListTableDescriptorsByNamespaceResponse);
+
+ /** returns a list of tables for a given namespace*/
+ rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
+ returns(ListTableNamesByNamespaceResponse);
}
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/MasterMonitor.proto Wed Aug 14 00:34:30 2013
@@ -45,6 +45,13 @@ message GetTableDescriptorsResponse {
repeated TableSchema table_schema = 1;
}
+message GetTableNamesRequest {
+}
+
+message GetTableNamesResponse {
+ repeated TableName table_names = 1;
+}
+
message GetClusterStatusRequest {
}
@@ -61,6 +68,10 @@ service MasterMonitorService {
rpc GetTableDescriptors(GetTableDescriptorsRequest)
returns(GetTableDescriptorsResponse);
+ /** Get the list of table names. */
+ rpc GetTableNames(GetTableNamesRequest)
+ returns(GetTableNamesResponse);
+
/** Return cluster status. */
rpc GetClusterStatus(GetClusterStatusRequest)
returns(GetClusterStatusResponse);
Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon Wed Aug 14 00:34:30 2013
@@ -298,7 +298,7 @@ AssignmentManager assignmentManager = ma
<%def catalogTables>
<%java>
- HTableDescriptor[] sysTables = admin.getTableDescriptorsByNamespace(NamespaceDescriptor
+ HTableDescriptor[] sysTables = admin.listTableDescriptorsByNamespace(NamespaceDescriptor
.SYSTEM_NAMESPACE_NAME_STR);
</%java>
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Wed Aug 14 00:34:30 2013
@@ -25,6 +25,7 @@ import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
@@ -175,6 +176,8 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetSchemaAlterStatusResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableNamesResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
@@ -2630,6 +2633,27 @@ MasterServices, Server {
}
/**
+ * Get list of table names
+ * @param controller Unused (set to null).
+ * @param req GetTableNamesRequest
+ * @return GetTableNamesResponse
+ * @throws ServiceException
+ */
+ public GetTableNamesResponse getTableNames(
+ RpcController controller, GetTableNamesRequest req) throws ServiceException {
+ try {
+ Collection<HTableDescriptor> descriptors = this.tableDescriptors.getAll().values();
+ GetTableNamesResponse.Builder builder = GetTableNamesResponse.newBuilder();
+ for (HTableDescriptor descriptor: descriptors) {
+ builder.addTableNames(ProtobufUtil.toProtoTableName(descriptor.getTableName()));
+ }
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ /**
* Compute the average load across all region servers.
* Currently, this uses a very naive computation - just uses the number of
* regions being served, ignoring stats about number of requests.
@@ -3014,13 +3038,13 @@ MasterServices, Server {
}
@Override
- public MasterAdminProtos.GetTableDescriptorsByNamespaceResponse getTableDescriptorsByNamespace(
- RpcController controller, MasterAdminProtos.GetTableDescriptorsByNamespaceRequest request)
+ public MasterAdminProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
+ RpcController controller, MasterAdminProtos.ListTableDescriptorsByNamespaceRequest request)
throws ServiceException {
try {
- MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.Builder b =
- MasterAdminProtos.GetTableDescriptorsByNamespaceResponse.newBuilder();
- for(HTableDescriptor htd: getTableDescriptorsByNamespace(request.getNamespaceName())) {
+ MasterAdminProtos.ListTableDescriptorsByNamespaceResponse.Builder b =
+ MasterAdminProtos.ListTableDescriptorsByNamespaceResponse.newBuilder();
+ for(HTableDescriptor htd: listTableDescriptorsByNamespace(request.getNamespaceName())) {
b.addTableSchema(htd.convert());
}
return b.build();
@@ -3029,6 +3053,22 @@ MasterServices, Server {
}
}
+ @Override
+ public MasterAdminProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace(
+ RpcController controller, MasterAdminProtos.ListTableNamesByNamespaceRequest request)
+ throws ServiceException {
+ try {
+ MasterAdminProtos.ListTableNamesByNamespaceResponse.Builder b =
+ MasterAdminProtos.ListTableNamesByNamespaceResponse.newBuilder();
+ for (TableName tableName: listTableNamesByNamespace(request.getNamespaceName())) {
+ b.addTableName(ProtobufUtil.toProtoTableName(tableName));
+ }
+ return b.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
private boolean isHealthCheckerConfigured() {
String healthScriptLocation = this.conf.get(HConstants.HEALTH_SCRIPT_LOC);
return org.apache.commons.lang.StringUtils.isNotBlank(healthScriptLocation);
@@ -3080,7 +3120,16 @@ MasterServices, Server {
return Lists.newArrayList(tableNamespaceManager.list());
}
- public List<HTableDescriptor> getTableDescriptorsByNamespace(String name) throws IOException {
+ public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
return Lists.newArrayList(tableDescriptors.getByNamespace(name).values());
}
+
+ public List<TableName> listTableNamesByNamespace(String name) throws IOException {
+ List<TableName> tableNames = Lists.newArrayList();
+ for (HTableDescriptor descriptor: tableDescriptors.getByNamespace(name).values()) {
+ tableNames.add(TableName.valueOf(name, descriptor.getNameAsString()));
+ }
+ return tableNames;
+ }
+
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java Wed Aug 14 00:34:30 2013
@@ -230,8 +230,16 @@ public interface MasterServices extends
/**
* Get list of table descriptors by namespace
* @param name namespace name
- * @return A descriptor
+ * @return descriptors
* @throws IOException
*/
- public List<HTableDescriptor> getTableDescriptorsByNamespace(String name) throws IOException;
+ public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException;
+
+ /**
+ * Get list of table names by namespace
+ * @param name namespace name
+ * @return table names
+ * @throws IOException
+ */
+ public List<TableName> listTableNamesByNamespace(String name) throws IOException;
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java Wed Aug 14 00:34:30 2013
@@ -155,7 +155,7 @@ public class TableNamespaceManager {
if (NamespaceDescriptor.RESERVED_NAMESPACES.contains(name)) {
throw new ConstraintException("Reserved namespace "+name+" cannot be removed.");
}
- int tableCount = masterServices.getTableDescriptorsByNamespace(name).size();
+ int tableCount = masterServices.listTableDescriptorsByNamespace(name).size();
if (tableCount > 0) {
throw new ConstraintException("Only empty namespaces can be removed. " +
"Namespace "+name+" has "+tableCount+" tables");
Modified: hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb (original)
+++ hbase/trunk/hbase-server/src/main/ruby/hbase/admin.rb Wed Aug 14 00:34:30 2013
@@ -42,7 +42,7 @@ module Hbase
#----------------------------------------------------------------------------------------------
# Returns a list of tables in hbase
def list(regex = ".*")
- @admin.listTables(regex).map { |t| t.getTableName().getNameAsString }
+ @admin.getTableNames(regex)
end
#----------------------------------------------------------------------------------------------
@@ -745,7 +745,7 @@ module Hbase
# Returns a list of tables in namespace
def list_namespace_tables(namespace_name)
unless namespace_name.nil?
- return @admin.getTableDescriptorsByNamespace(namespace_name).map { |t| t.getTableName().getNameAsString }
+ return @admin.listTableNamesByNamespace(namespace_name).map { |t| t.getQualifierAsString() }
end
raise(ArgumentError, "Failed to find namespace named #{namespace_name}")
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestNamespace.java Wed Aug 14 00:34:30 2013
@@ -109,7 +109,7 @@ public class TestNamespace {
TableName.META_TABLE_NAME,
TableName.NAMESPACE_TABLE_NAME);
HTableDescriptor[] descs =
- admin.getTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
+ admin.listTableDescriptorsByNamespace(NamespaceDescriptor.SYSTEM_NAMESPACE.getName());
assertEquals(systemTables.size(), descs.length);
for (HTableDescriptor desc : descs) {
assertTrue(systemTables.contains(desc.getTableName()));
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java?rev=1513703&r1=1513702&r2=1513703&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java Wed Aug 14 00:34:30 2013
@@ -330,11 +330,16 @@ public class TestCatalogJanitor {
}
@Override
- public List<HTableDescriptor> getTableDescriptorsByNamespace(String name) throws IOException {
+ public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
@Override
+ public List<TableName> listTableNamesByNamespace(String name) throws IOException {
+ return null;
+ }
+
+ @Override
public void deleteTable(TableName tableName) throws IOException { }
@Override