You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/06/22 02:15:12 UTC
svn commit: r1495642 [6/6] - in /hbase/trunk:
hbase-client/src/main/java/org/apache/hadoop/hbase/
hbase-client/src/main/java/org/apache/hadoop/hbase/filter/
hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/
hbase-client/src/main/java/org/apa...
Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java Sat Jun 22 00:15:11 2013
@@ -1138,8 +1138,8 @@ public final class RegionServerStatusPro
// optional .ServerLoad load = 2;
boolean hasLoad();
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad();
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad();
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder();
}
public static final class RegionServerReportRequest extends
com.google.protobuf.GeneratedMessage
@@ -1185,20 +1185,20 @@ public final class RegionServerStatusPro
// optional .ServerLoad load = 2;
public static final int LOAD_FIELD_NUMBER = 2;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_;
+ private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_;
public boolean hasLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
return load_;
}
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
return load_;
}
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
- load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
+ load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -1424,7 +1424,7 @@ public final class RegionServerStatusPro
}
bitField0_ = (bitField0_ & ~0x00000001);
if (loadBuilder_ == null) {
- load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
+ load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
} else {
loadBuilder_.clear();
}
@@ -1560,7 +1560,7 @@ public final class RegionServerStatusPro
break;
}
case 18: {
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder();
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder();
if (hasLoad()) {
subBuilder.mergeFrom(getLoad());
}
@@ -1665,20 +1665,20 @@ public final class RegionServerStatusPro
}
// optional .ServerLoad load = 2;
- private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
+ private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder> loadBuilder_;
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> loadBuilder_;
public boolean hasLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad getLoad() {
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getLoad() {
if (loadBuilder_ == null) {
return load_;
} else {
return loadBuilder_.getMessage();
}
}
- public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
+ public Builder setLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (loadBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
@@ -1692,7 +1692,7 @@ public final class RegionServerStatusPro
return this;
}
public Builder setLoad(
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder builderForValue) {
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) {
if (loadBuilder_ == null) {
load_ = builderForValue.build();
onChanged();
@@ -1702,12 +1702,12 @@ public final class RegionServerStatusPro
bitField0_ |= 0x00000002;
return this;
}
- public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad value) {
+ public Builder mergeLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (loadBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
- load_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance()) {
+ load_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
load_ =
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(load_).mergeFrom(value).buildPartial();
} else {
load_ = value;
}
@@ -1720,7 +1720,7 @@ public final class RegionServerStatusPro
}
public Builder clearLoad() {
if (loadBuilder_ == null) {
- load_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.getDefaultInstance();
+ load_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
onChanged();
} else {
loadBuilder_.clear();
@@ -1728,12 +1728,12 @@ public final class RegionServerStatusPro
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder getLoadBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getLoadBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getLoadFieldBuilder().getBuilder();
}
- public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder getLoadOrBuilder() {
+ public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getLoadOrBuilder() {
if (loadBuilder_ != null) {
return loadBuilder_.getMessageOrBuilder();
} else {
@@ -1741,11 +1741,11 @@ public final class RegionServerStatusPro
}
}
private com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>
getLoadFieldBuilder() {
if (loadBuilder_ == null) {
loadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerLoadOrBuilder>(
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>(
load_,
getParentForChildren(),
isClean());
@@ -4175,31 +4175,32 @@ public final class RegionServerStatusPro
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\030RegionServerStatus.proto\032\013hbase.proto\"" +
- "^\n\032RegionServerStartupRequest\022\014\n\004port\030\001 " +
- "\002(\r\022\027\n\017serverStartCode\030\002 \002(\004\022\031\n\021serverCu" +
- "rrentTime\030\003 \002(\004\"B\n\033RegionServerStartupRe" +
- "sponse\022#\n\nmapEntries\030\001 \003(\0132\017.NameStringP" +
- "air\"S\n\031RegionServerReportRequest\022\033\n\006serv" +
- "er\030\001 \002(\0132\013.ServerName\022\031\n\004load\030\002 \001(\0132\013.Se" +
- "rverLoad\"\034\n\032RegionServerReportResponse\"N" +
- "\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " +
- "\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034",
- "\n\032ReportRSFatalErrorResponse\"5\n\037GetLastF" +
- "lushedSequenceIdRequest\022\022\n\nregionName\030\001 " +
- "\002(\014\"A\n GetLastFlushedSequenceIdResponse\022" +
- "\035\n\025lastFlushedSequenceId\030\001 \002(\0042\354\002\n\031Regio" +
- "nServerStatusService\022P\n\023regionServerStar" +
- "tup\022\033.RegionServerStartupRequest\032\034.Regio" +
- "nServerStartupResponse\022M\n\022regionServerRe" +
- "port\022\032.RegionServerReportRequest\032\033.Regio" +
- "nServerReportResponse\022M\n\022reportRSFatalEr" +
- "ror\022\032.ReportRSFatalErrorRequest\032\033.Report",
- "RSFatalErrorResponse\022_\n\030getLastFlushedSe" +
- "quenceId\022 .GetLastFlushedSequenceIdReque" +
- "st\032!.GetLastFlushedSequenceIdResponseBN\n" +
- "*org.apache.hadoop.hbase.protobuf.genera" +
- "tedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
+ "\n\030RegionServerStatus.proto\032\013hbase.proto\032" +
+ "\023ClusterStatus.proto\"^\n\032RegionServerStar" +
+ "tupRequest\022\014\n\004port\030\001 \002(\r\022\027\n\017serverStartC" +
+ "ode\030\002 \002(\004\022\031\n\021serverCurrentTime\030\003 \002(\004\"B\n\033" +
+ "RegionServerStartupResponse\022#\n\nmapEntrie" +
+ "s\030\001 \003(\0132\017.NameStringPair\"S\n\031RegionServer" +
+ "ReportRequest\022\033\n\006server\030\001 \002(\0132\013.ServerNa" +
+ "me\022\031\n\004load\030\002 \001(\0132\013.ServerLoad\"\034\n\032RegionS" +
+ "erverReportResponse\"N\n\031ReportRSFatalErro" +
+ "rRequest\022\033\n\006server\030\001 \002(\0132\013.ServerName\022\024\n",
+ "\014errorMessage\030\002 \002(\t\"\034\n\032ReportRSFatalErro" +
+ "rResponse\"5\n\037GetLastFlushedSequenceIdReq" +
+ "uest\022\022\n\nregionName\030\001 \002(\014\"A\n GetLastFlush" +
+ "edSequenceIdResponse\022\035\n\025lastFlushedSeque" +
+ "nceId\030\001 \002(\0042\354\002\n\031RegionServerStatusServic" +
+ "e\022P\n\023regionServerStartup\022\033.RegionServerS" +
+ "tartupRequest\032\034.RegionServerStartupRespo" +
+ "nse\022M\n\022regionServerReport\022\032.RegionServer" +
+ "ReportRequest\032\033.RegionServerReportRespon" +
+ "se\022M\n\022reportRSFatalError\022\032.ReportRSFatal",
+ "ErrorRequest\032\033.ReportRSFatalErrorRespons" +
+ "e\022_\n\030getLastFlushedSequenceId\022 .GetLastF" +
+ "lushedSequenceIdRequest\032!.GetLastFlushed" +
+ "SequenceIdResponseBN\n*org.apache.hadoop." +
+ "hbase.protobuf.generatedB\030RegionServerSt" +
+ "atusProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -4277,6 +4278,7 @@ public final class RegionServerStatusPro
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.getDescriptor(),
}, assigner);
}
Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Sat Jun 22 00:15:11 2013
@@ -8,7 +8,7 @@ public final class ZooKeeperProtos {
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
- public interface RootRegionServerOrBuilder
+ public interface MetaRegionServerOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ServerName server = 1;
@@ -16,32 +16,32 @@ public final class ZooKeeperProtos {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
}
- public static final class RootRegionServer extends
+ public static final class MetaRegionServer extends
com.google.protobuf.GeneratedMessage
- implements RootRegionServerOrBuilder {
- // Use RootRegionServer.newBuilder() to construct.
- private RootRegionServer(Builder builder) {
+ implements MetaRegionServerOrBuilder {
+ // Use MetaRegionServer.newBuilder() to construct.
+ private MetaRegionServer(Builder builder) {
super(builder);
}
- private RootRegionServer(boolean noInit) {}
+ private MetaRegionServer(boolean noInit) {}
- private static final RootRegionServer defaultInstance;
- public static RootRegionServer getDefaultInstance() {
+ private static final MetaRegionServer defaultInstance;
+ public static MetaRegionServer getDefaultInstance() {
return defaultInstance;
}
- public RootRegionServer getDefaultInstanceForType() {
+ public MetaRegionServer getDefaultInstanceForType() {
return defaultInstance;
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
}
private int bitField0_;
@@ -114,10 +114,10 @@ public final class ZooKeeperProtos {
if (obj == this) {
return true;
}
- if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)) {
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)) {
return super.equals(obj);
}
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) obj;
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) obj;
boolean result = true;
result = result && (hasServer() == other.hasServer());
@@ -142,41 +142,41 @@ public final class ZooKeeperProtos {
return hash;
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(byte[] data)
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data).buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return newBuilder().mergeFrom(data, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(java.io.InputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return newBuilder().mergeFrom(input, extensionRegistry)
.buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(java.io.InputStream input)
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
Builder builder = newBuilder();
if (builder.mergeDelimitedFrom(input)) {
@@ -185,7 +185,7 @@ public final class ZooKeeperProtos {
return null;
}
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseDelimitedFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -196,12 +196,12 @@ public final class ZooKeeperProtos {
return null;
}
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return newBuilder().mergeFrom(input).buildParsed();
}
- public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer parseFrom(
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
@@ -211,7 +211,7 @@ public final class ZooKeeperProtos {
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
- public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer prototype) {
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@@ -224,18 +224,18 @@ public final class ZooKeeperProtos {
}
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
- implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServerOrBuilder {
+ implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServerOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_descriptor;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RootRegionServer_fieldAccessorTable;
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_MetaRegionServer_fieldAccessorTable;
}
- // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.newBuilder()
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
@@ -270,24 +270,24 @@ public final class ZooKeeperProtos {
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDescriptor();
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDescriptor();
}
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer getDefaultInstanceForType() {
- return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance();
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance();
}
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer build() {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer build() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
- private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildParsed()
+ private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildParsed()
throws com.google.protobuf.InvalidProtocolBufferException {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = buildPartial();
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(
result).asInvalidProtocolBufferException();
@@ -295,8 +295,8 @@ public final class ZooKeeperProtos {
return result;
}
- public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer buildPartial() {
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer(this);
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
@@ -313,16 +313,16 @@ public final class ZooKeeperProtos {
}
public Builder mergeFrom(com.google.protobuf.Message other) {
- if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer) {
- return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer)other);
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer)other);
} else {
super.mergeFrom(other);
return this;
}
}
- public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer other) {
- if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.getDefaultInstance()) return this;
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.getDefaultInstance()) return this;
if (other.hasServer()) {
mergeServer(other.getServer());
}
@@ -470,15 +470,15 @@ public final class ZooKeeperProtos {
return serverBuilder_;
}
- // @@protoc_insertion_point(builder_scope:RootRegionServer)
+ // @@protoc_insertion_point(builder_scope:MetaRegionServer)
}
static {
- defaultInstance = new RootRegionServer(true);
+ defaultInstance = new MetaRegionServer(true);
defaultInstance.initFields();
}
- // @@protoc_insertion_point(class_scope:RootRegionServer)
+ // @@protoc_insertion_point(class_scope:MetaRegionServer)
}
public interface MasterOrBuilder
@@ -6868,10 +6868,10 @@ public final class ZooKeeperProtos {
}
private static com.google.protobuf.Descriptors.Descriptor
- internal_static_RootRegionServer_descriptor;
+ internal_static_MetaRegionServer_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
- internal_static_RootRegionServer_fieldAccessorTable;
+ internal_static_MetaRegionServer_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_Master_descriptor;
private static
@@ -6941,7 +6941,7 @@ public final class ZooKeeperProtos {
descriptor;
static {
java.lang.String[] descriptorData = {
- "\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020RootRe" +
+ "\n\017ZooKeeper.proto\032\013hbase.proto\"/\n\020MetaRe" +
"gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
"%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
"\n\tClusterUp\022\021\n\tstartDate\030\001 \002(\t\"\203\001\n\020Regio" +
@@ -6976,14 +6976,14 @@ public final class ZooKeeperProtos {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
- internal_static_RootRegionServer_descriptor =
+ internal_static_MetaRegionServer_descriptor =
getDescriptor().getMessageTypes().get(0);
- internal_static_RootRegionServer_fieldAccessorTable = new
+ internal_static_MetaRegionServer_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
- internal_static_RootRegionServer_descriptor,
+ internal_static_MetaRegionServer_descriptor,
new java.lang.String[] { "Server", },
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.class,
- org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RootRegionServer.Builder.class);
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.class,
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer.Builder.class);
internal_static_Master_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_Master_fieldAccessorTable = new
Added: hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto?rev=1495642&view=auto
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto (added)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Cell.proto Sat Jun 22 00:15:11 2013
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Cell and KeyValue protos
+
+option java_package = "org.apache.hadoop.hbase.protobuf.generated";
+option java_outer_classname = "CellProtos";
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+/**
+ * The type of the key in a Cell
+ */
+enum CellType {
+ MINIMUM = 0;
+ PUT = 4;
+
+ DELETE = 8;
+ DELETE_COLUMN = 12;
+ DELETE_FAMILY = 14;
+
+ // MAXIMUM is used when searching; you look from maximum on down.
+ MAXIMUM = 255;
+}
+
+/**
+ * Protocol buffer version of Cell.
+ */
+message Cell {
+ optional bytes row = 1;
+ optional bytes family = 2;
+ optional bytes qualifier = 3;
+ optional uint64 timestamp = 4;
+ optional CellType cellType = 5;
+ optional bytes value = 6;
+}
+
+/**
+ * Protocol buffer version of KeyValue.
+ * It doesn't have those transient parameters
+ */
+message KeyValue {
+ required bytes row = 1;
+ required bytes family = 2;
+ required bytes qualifier = 3;
+ optional uint64 timestamp = 4;
+ optional CellType keyType = 5;
+ optional bytes value = 6;
+}
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Client.proto Sat Jun 22 00:15:11 2013
@@ -25,6 +25,8 @@ option java_generate_equals_and_hash = t
option optimize_for = SPEED;
import "hbase.proto";
+import "Filter.proto";
+import "Cell.proto";
import "Comparator.proto";
/**
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ClusterStatus.proto Sat Jun 22 00:15:11 2013
@@ -51,11 +51,110 @@ message RegionInTransition {
required RegionState regionState = 2;
}
+message RegionLoad {
+ /** the region specifier */
+ required RegionSpecifier regionSpecifier = 1;
+
+ /** the number of stores for the region */
+ optional uint32 stores = 2;
+
+ /** the number of storefiles for the region */
+ optional uint32 storefiles = 3;
+
+ /** the total size of the store files for the region, uncompressed, in MB */
+ optional uint32 storeUncompressedSizeMB = 4;
+
+ /** the current total size of the store files for the region, in MB */
+ optional uint32 storefileSizeMB = 5;
+
+ /** the current size of the memstore for the region, in MB */
+ optional uint32 memstoreSizeMB = 6;
+
+ /**
+ * The current total size of root-level store file indexes for the region,
+ * in MB. The same as {@link #rootIndexSizeKB} but in MB.
+ */
+ optional uint32 storefileIndexSizeMB = 7;
+
+ /** the current total read requests made to region */
+ optional uint64 readRequestsCount = 8;
+
+ /** the current total write requests made to region */
+ optional uint64 writeRequestsCount = 9;
+
+ /** the total compacting key values in currently running compaction */
+ optional uint64 totalCompactingKVs = 10;
+
+ /** the completed count of key values in currently running compaction */
+ optional uint64 currentCompactedKVs = 11;
+
+ /** The current total size of root-level indexes for the region, in KB. */
+ optional uint32 rootIndexSizeKB = 12;
+
+ /** The total size of all index blocks, not just the root level, in KB. */
+ optional uint32 totalStaticIndexSizeKB = 13;
+
+ /**
+ * The total size of all Bloom filter blocks, not just loaded into the
+ * block cache, in KB.
+ */
+ optional uint32 totalStaticBloomSizeKB = 14;
+
+ /** the most recent sequence Id from cache flush */
+ optional uint64 completeSequenceId = 15;
+}
+
+/* Server-level protobufs */
+
+message ServerLoad {
+ /** Number of requests since last report. */
+ optional uint32 numberOfRequests = 1;
+
+ /** Total Number of requests from the start of the region server. */
+ optional uint32 totalNumberOfRequests = 2;
+
+ /** the amount of used heap, in MB. */
+ optional uint32 usedHeapMB = 3;
+
+ /** the maximum allowable size of the heap, in MB. */
+ optional uint32 maxHeapMB = 4;
+
+ /** Information on the load of individual regions. */
+ repeated RegionLoad regionLoads = 5;
+
+ /**
+ * Regionserver-level coprocessors, e.g., WALObserver implementations.
+ * Region-level coprocessors, on the other hand, are stored inside RegionLoad
+ * objects.
+ */
+ repeated Coprocessor coprocessors = 6;
+
+ /**
+ * Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
+ * time is measured as the difference, measured in milliseconds, between the current time
+ * and midnight, January 1, 1970 UTC.
+ */
+ optional uint64 reportStartTime = 7;
+
+ /**
+ * Time when report was generated.
+ * time is measured as the difference, measured in milliseconds, between the current time
+ * and midnight, January 1, 1970 UTC.
+ */
+ optional uint64 reportEndTime = 8;
+
+ /**
+ * The port number that this region server is hosing an info server on.
+ */
+ optional uint32 infoServerPort = 9;
+}
+
message LiveServerInfo {
required ServerName server = 1;
required ServerLoad serverLoad = 2;
}
+
message ClusterStatus {
optional HBaseVersionFileContent hbaseVersion = 1;
repeated LiveServerInfo liveServers = 2;
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/Filter.proto Sat Jun 22 00:15:11 2013
@@ -27,6 +27,11 @@ option optimize_for = SPEED;
import "hbase.proto";
import "Comparator.proto";
+message Filter {
+ required string name = 1;
+ optional bytes serializedFilter = 2;
+}
+
message ColumnCountGetFilter {
required int32 limit = 1;
}
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/RegionServerStatus.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/RegionServerStatus.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/RegionServerStatus.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/RegionServerStatus.proto Sat Jun 22 00:15:11 2013
@@ -25,6 +25,7 @@ option java_generate_equals_and_hash = t
option optimize_for = SPEED;
import "hbase.proto";
+import "ClusterStatus.proto";
message RegionServerStartupRequest {
/** Port number this regionserver is up on */
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto Sat Jun 22 00:15:11 2013
@@ -28,10 +28,10 @@ option optimize_for = SPEED;
import "hbase.proto";
/**
- * Content of the root-region-server znode.
+ * Content of the meta-region-server znode.
*/
-message RootRegionServer {
- // The ServerName hosting the root region currently.
+message MetaRegionServer {
+ // The ServerName hosting the meta region currently.
required ServerName server = 1;
}
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/hbase.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/hbase.proto?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/hbase.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/hbase.proto Sat Jun 22 00:15:11 2013
@@ -23,32 +23,7 @@ option java_outer_classname = "HBaseProt
option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
-/**
- * The type of the key in a Cell
- */
-enum CellType {
- MINIMUM = 0;
- PUT = 4;
-
- DELETE = 8;
- DELETE_COLUMN = 12;
- DELETE_FAMILY = 14;
-
- // MAXIMUM is used when searching; you look from maximum on down.
- MAXIMUM = 255;
-}
-
-/**
- * Protocol buffer version of Cell.
- */
-message Cell {
- optional bytes row = 1;
- optional bytes family = 2;
- optional bytes qualifier = 3;
- optional uint64 timestamp = 4;
- optional CellType cellType = 5;
- optional bytes value = 6;
-}
+import "Cell.proto";
/**
* Table Schema
@@ -110,104 +85,6 @@ message RegionSpecifier {
}
}
-message RegionLoad {
- /** the region specifier */
- required RegionSpecifier regionSpecifier = 1;
-
- /** the number of stores for the region */
- optional uint32 stores = 2;
-
- /** the number of storefiles for the region */
- optional uint32 storefiles = 3;
-
- /** the total size of the store files for the region, uncompressed, in MB */
- optional uint32 storeUncompressedSizeMB = 4;
-
- /** the current total size of the store files for the region, in MB */
- optional uint32 storefileSizeMB = 5;
-
- /** the current size of the memstore for the region, in MB */
- optional uint32 memstoreSizeMB = 6;
-
- /**
- * The current total size of root-level store file indexes for the region,
- * in MB. The same as {@link #rootIndexSizeKB} but in MB.
- */
- optional uint32 storefileIndexSizeMB = 7;
-
- /** the current total read requests made to region */
- optional uint64 readRequestsCount = 8;
-
- /** the current total write requests made to region */
- optional uint64 writeRequestsCount = 9;
-
- /** the total compacting key values in currently running compaction */
- optional uint64 totalCompactingKVs = 10;
-
- /** the completed count of key values in currently running compaction */
- optional uint64 currentCompactedKVs = 11;
-
- /** The current total size of root-level indexes for the region, in KB. */
- optional uint32 rootIndexSizeKB = 12;
-
- /** The total size of all index blocks, not just the root level, in KB. */
- optional uint32 totalStaticIndexSizeKB = 13;
-
- /**
- * The total size of all Bloom filter blocks, not just loaded into the
- * block cache, in KB.
- */
- optional uint32 totalStaticBloomSizeKB = 14;
-
- /** the most recent sequence Id from cache flush */
- optional uint64 completeSequenceId = 15;
-}
-
-/* Server-level protobufs */
-
-message ServerLoad {
- /** Number of requests since last report. */
- optional uint32 numberOfRequests = 1;
-
- /** Total Number of requests from the start of the region server. */
- optional uint32 totalNumberOfRequests = 2;
-
- /** the amount of used heap, in MB. */
- optional uint32 usedHeapMB = 3;
-
- /** the maximum allowable size of the heap, in MB. */
- optional uint32 maxHeapMB = 4;
-
- /** Information on the load of individual regions. */
- repeated RegionLoad regionLoads = 5;
-
- /**
- * Regionserver-level coprocessors, e.g., WALObserver implementations.
- * Region-level coprocessors, on the other hand, are stored inside RegionLoad
- * objects.
- */
- repeated Coprocessor coprocessors = 6;
-
- /**
- * Time when incremental (non-total) counts began being calculated (e.g. numberOfRequests)
- * time is measured as the difference, measured in milliseconds, between the current time
- * and midnight, January 1, 1970 UTC.
- */
- optional uint64 reportStartTime = 7;
-
- /**
- * Time when report was generated.
- * time is measured as the difference, measured in milliseconds, between the current time
- * and midnight, January 1, 1970 UTC.
- */
- optional uint64 reportEndTime = 8;
-
- /**
- * The port number that this region server is hosing an info server on.
- */
- optional uint32 infoServerPort = 9;
-}
-
/**
* A range of time. Both from and to are Java time
* stamp in milliseconds. If you don't specify a time
@@ -219,11 +96,6 @@ message TimeRange {
optional uint64 to = 2;
}
-message Filter {
- required string name = 1;
- optional bytes serializedFilter = 2;
-}
-
/* Comparison operators */
enum CompareType {
LESS = 0;
@@ -236,19 +108,6 @@ enum CompareType {
}
/**
- * Protocol buffer version of KeyValue.
- * It doesn't have those transient parameters
- */
-message KeyValue {
- required bytes row = 1;
- required bytes family = 2;
- required bytes qualifier = 3;
- optional uint64 timestamp = 4;
- optional CellType keyType = 5;
- optional bytes value = 6;
-}
-
-/**
* Protocol buffer version of ServerName
*/
message ServerName {
Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.jamon Sat Jun 22 00:15:11 2013
@@ -30,7 +30,7 @@ org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
+org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
</%import>
<%if format.equals("json") %>
<& ../common/TaskMonitorTmpl; filter = filter; format = "json" &>
Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon Sat Jun 22 00:15:11 2013
@@ -29,7 +29,7 @@
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
- org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
+ org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
</%import>
<%if (onlineRegions != null && onlineRegions.size() > 0) %>
Modified: hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon (original)
+++ hbase/trunk/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon Sat Jun 22 00:15:11 2013
@@ -29,7 +29,7 @@ org.apache.hadoop.hbase.ServerName;
org.apache.hadoop.hbase.HBaseConfiguration;
org.apache.hadoop.hbase.protobuf.ProtobufUtil;
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ServerInfo;
-org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
+org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
org.apache.hadoop.hbase.metrics.histogram.MetricsHistogram;
org.apache.hadoop.util.StringUtils;
com.yammer.metrics.stats.Snapshot;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/codec/MessageCodec.java Sat Jun 22 00:15:11 2013
@@ -27,7 +27,7 @@ import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.codec.BaseDecoder;
import org.apache.hadoop.hbase.codec.BaseEncoder;
import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.CellProtos;
import com.google.protobuf.ByteString;
import org.apache.hadoop.classification.InterfaceStability;
@@ -47,7 +47,7 @@ public class MessageCodec implements Cod
@Override
public void write(Cell cell) throws IOException {
checkFlushed();
- HBaseProtos.Cell.Builder builder = HBaseProtos.Cell.newBuilder();
+ CellProtos.Cell.Builder builder = CellProtos.Cell.newBuilder();
// This copies bytes from Cell to ByteString. I don't see anyway around the copy.
// ByteString is final.
builder.setRow(ByteString.copyFrom(cell.getRowArray(), cell.getRowOffset(),
@@ -57,10 +57,10 @@ public class MessageCodec implements Cod
builder.setQualifier(ByteString.copyFrom(cell.getQualifierArray(), cell.getQualifierOffset(),
cell.getQualifierLength()));
builder.setTimestamp(cell.getTimestamp());
- builder.setCellType(HBaseProtos.CellType.valueOf(cell.getTypeByte()));
+ builder.setCellType(CellProtos.CellType.valueOf(cell.getTypeByte()));
builder.setValue(ByteString.copyFrom(cell.getValueArray(), cell.getValueOffset(),
cell.getValueLength()));
- HBaseProtos.Cell pbcell = builder.build();
+ CellProtos.Cell pbcell = builder.build();
pbcell.writeDelimitedTo(this.out);
}
}
@@ -71,7 +71,7 @@ public class MessageCodec implements Cod
}
protected Cell parseCell() throws IOException {
- HBaseProtos.Cell pbcell = HBaseProtos.Cell.parseDelimitedFrom(this.in);
+ CellProtos.Cell pbcell = CellProtos.Cell.parseDelimitedFrom(this.in);
return CellUtil.createCell(pbcell.getRow().toByteArray(),
pbcell.getFamily().toByteArray(), pbcell.getQualifier().toByteArray(),
pbcell.getTimestamp(), (byte)pbcell.getCellType().getNumber(),
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Sat Jun 22 00:15:11 2013
@@ -102,7 +102,7 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -1244,7 +1244,7 @@ MasterServices, Server {
public RegionServerReportResponse regionServerReport(
RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
- HBaseProtos.ServerLoad sl = request.getLoad();
+ ClusterStatusProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
if (sl != null && this.metricsMaster != null) {
// Up our metrics.
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Sat Jun 22 00:15:11 2013
@@ -46,7 +46,6 @@ import org.apache.hadoop.hbase.client.HC
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.RetriesExhaustedException;
import org.apache.hadoop.hbase.exceptions.ClockOutOfSyncException;
-import org.apache.hadoop.hbase.exceptions.PleaseHoldException;
import org.apache.hadoop.hbase.exceptions.YouAreDeadException;
import org.apache.hadoop.hbase.exceptions.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Sat Jun 22 00:15:11 2013
@@ -170,10 +170,10 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultCellMeta;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@@ -978,7 +978,7 @@ public class HRegionServer implements Cl
void tryRegionServerReport(long reportStartTime, long reportEndTime)
throws IOException {
- HBaseProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
+ ClusterStatusProtos.ServerLoad sl = buildServerLoad(reportStartTime, reportEndTime);
try {
RegionServerReportRequest.Builder request = RegionServerReportRequest.newBuilder();
ServerName sn = ServerName.parseVersionedServerName(
@@ -1000,7 +1000,7 @@ public class HRegionServer implements Cl
}
}
- HBaseProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
+ ClusterStatusProtos.ServerLoad buildServerLoad(long reportStartTime, long reportEndTime) {
// We're getting the MetricsRegionServerWrapper here because the wrapper computes requests
// per second, and other metrics As long as metrics are part of ServerLoad it's best to use
// the wrapper to compute those numbers in one place.
@@ -1013,7 +1013,8 @@ public class HRegionServer implements Cl
MemoryUsage memory =
ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
- HBaseProtos.ServerLoad.Builder serverLoad = HBaseProtos.ServerLoad.newBuilder();
+ ClusterStatusProtos.ServerLoad.Builder serverLoad =
+ ClusterStatusProtos.ServerLoad.newBuilder();
serverLoad.setNumberOfRequests((int) regionServerWrapper.getRequestsPerSecond());
serverLoad.setTotalNumberOfRequests((int) regionServerWrapper.getTotalRequestCount());
serverLoad.setUsedHeapMB((int)(memory.getUsed() / 1024 / 1024));
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java Sat Jun 22 00:15:11 2013
@@ -22,6 +22,7 @@ package org.apache.hadoop.hbase;
import static org.junit.Assert.*;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -56,7 +57,7 @@ public class TestServerLoad {
assertTrue(slToString.contains("coprocessors=[]"));
}
- private HBaseProtos.ServerLoad createServerLoadProto() {
+ private ClusterStatusProtos.ServerLoad createServerLoadProto() {
HBaseProtos.RegionSpecifier rSpecOne =
HBaseProtos.RegionSpecifier.newBuilder()
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
@@ -66,17 +67,18 @@ public class TestServerLoad {
.setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.ENCODED_REGION_NAME)
.setValue(ByteString.copyFromUtf8("QWERTYUIOP")).build();
- HBaseProtos.RegionLoad rlOne =
- HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
+ ClusterStatusProtos.RegionLoad rlOne =
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).build();
- HBaseProtos.RegionLoad rlTwo =
- HBaseProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
+ ClusterStatusProtos.RegionLoad rlTwo =
+ ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).build();
- HBaseProtos.ServerLoad sl =
- HBaseProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).addRegionLoads(rlTwo).build();
+ ClusterStatusProtos.ServerLoad sl =
+ ClusterStatusProtos.ServerLoad.newBuilder().addRegionLoads(rlOne).
+ addRegionLoads(rlTwo).build();
return sl;
}
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestColumnPaginationFilter.java Sat Jun 22 00:15:11 2013
@@ -18,23 +18,17 @@
*/
package org.apache.hadoop.hbase.filter;
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
+import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.SmallTests;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.FilterProtos;
import org.apache.hadoop.hbase.util.Bytes;
-
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import static org.junit.Assert.assertTrue;
-
/**
* Test for the ColumnPaginationFilter, used mainly to test the successful serialization of the filter.
* More test functionality can be found within {@link org.apache.hadoop.hbase.filter.TestFilter#testColumnPaginationFilter()}
@@ -65,7 +59,7 @@ public class TestColumnPaginationFilter
}
private Filter serializationTest(Filter filter) throws Exception {
- HBaseProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
+ FilterProtos.Filter filterProto = ProtobufUtil.toFilter(filter);
Filter newFilter = ProtobufUtil.toFilter(filterProto);
return newFilter;
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java?rev=1495642&r1=1495641&r2=1495642&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestMasterMetrics.java Sat Jun 22 00:15:11 2013
@@ -24,7 +24,7 @@ import org.apache.hadoop.hbase.HBaseTest
import org.apache.hadoop.hbase.MediumTests;
import org.apache.hadoop.hbase.MiniHBaseCluster;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
@@ -72,7 +72,7 @@ public class TestMasterMetrics {
HRegionServer rs = cluster.getRegionServer(0);
request.setServer(ProtobufUtil.toServerName(rs.getServerName()));
- HBaseProtos.ServerLoad sl = HBaseProtos.ServerLoad.newBuilder()
+ ClusterStatusProtos.ServerLoad sl = ClusterStatusProtos.ServerLoad.newBuilder()
.setTotalNumberOfRequests(10000)
.build();
master.getMetrics().getMetricsSource().init();