You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by la...@apache.org on 2015/01/29 22:56:16 UTC
[1/3] hbase git commit: HBASE-12859 New master API to track major
compaction completion.
Repository: hbase
Updated Branches:
refs/heads/branch-1 8ddc90c20 -> 0b3502f2b
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-protocol/src/main/protobuf/ClusterStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 7e78395..2b2d9eb 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -113,6 +113,8 @@ message RegionLoad {
/** The current data locality for region in the regionserver */
optional float data_locality = 16;
+
+ optional uint64 last_major_compaction_ts = 17 [default = 0];
}
/* Server-level protobufs */
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-protocol/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/protobuf/Master.proto b/hbase-protocol/src/main/protobuf/Master.proto
index 34f68e9..170a326 100644
--- a/hbase-protocol/src/main/protobuf/Master.proto
+++ b/hbase-protocol/src/main/protobuf/Master.proto
@@ -364,6 +364,18 @@ message IsProcedureDoneResponse {
optional ProcedureDescription snapshot = 2;
}
+message MajorCompactionTimestampRequest {
+ required TableName table_name = 1;
+}
+
+message MajorCompactionTimestampForRegionRequest {
+ required RegionSpecifier region = 1;
+}
+
+message MajorCompactionTimestampResponse {
+ required int64 compaction_timestamp = 1;
+}
+
service MasterService {
/** Used by the client to get the number of regions that have received the updated schema */
rpc GetSchemaAlterStatus(GetSchemaAlterStatusRequest)
@@ -571,4 +583,12 @@ service MasterService {
/** returns a list of tables for a given namespace*/
rpc ListTableNamesByNamespace(ListTableNamesByNamespaceRequest)
returns(ListTableNamesByNamespaceResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestamp(MajorCompactionTimestampRequest)
+ returns(MajorCompactionTimestampResponse);
+
+ /** Returns the timestamp of the last major compaction */
+ rpc getLastMajorCompactionTimestampForRegion(MajorCompactionTimestampForRegionRequest)
+ returns(MajorCompactionTimestampResponse);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
index 2bef680..52491e6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/AbstractHFileWriter.java
@@ -148,6 +148,9 @@ public abstract class AbstractHFileWriter implements HFile.Writer {
int avgValueLen =
entryCount == 0 ? 0 : (int) (totalValueLength / entryCount);
fileInfo.append(FileInfo.AVG_VALUE_LEN, Bytes.toBytes(avgValueLen), false);
+
+ fileInfo.append(FileInfo.CREATE_TIME_TS, Bytes.toBytes(hFileContext.getFileCreateTime()),
+ false);
}
/**
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
index f938020..f168420 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFile.java
@@ -540,6 +540,7 @@ public class HFile {
static final byte [] LASTKEY = Bytes.toBytes(RESERVED_PREFIX + "LASTKEY");
static final byte [] AVG_KEY_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_KEY_LEN");
static final byte [] AVG_VALUE_LEN = Bytes.toBytes(RESERVED_PREFIX + "AVG_VALUE_LEN");
+ static final byte [] CREATE_TIME_TS = Bytes.toBytes(RESERVED_PREFIX + "CREATE_TIME_TS");
static final byte [] COMPARATOR = Bytes.toBytes(RESERVED_PREFIX + "COMPARATOR");
static final byte [] TAGS_COMPRESSED = Bytes.toBytes(RESERVED_PREFIX + "TAGS_COMPRESSED");
public static final byte [] MAX_TAGS_LEN = Bytes.toBytes(RESERVED_PREFIX + "MAX_TAGS_LEN");
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
index e466041..26cb6c9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderV2.java
@@ -157,6 +157,7 @@ public class HFileReaderV2 extends AbstractHFileReader {
// File info
fileInfo = new FileInfo();
fileInfo.read(blockIter.nextBlockWithBlockType(BlockType.FILE_INFO).getByteStream());
+ this.hfileContext.setFileCreateTime(Bytes.toLong(fileInfo.get(FileInfo.CREATE_TIME_TS)));
lastKey = fileInfo.get(FileInfo.LASTKEY);
avgKeyLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_KEY_LEN));
avgValueLen = Bytes.toInt(fileInfo.get(FileInfo.AVG_VALUE_LEN));
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 775f018..2617c85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2305,4 +2305,14 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
}
}
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForTable(table);
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ return getClusterStatus().getLastMajorCompactionTsForRegion(regionName);
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index d8a71ff..81424bf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -106,6 +106,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@@ -1243,4 +1246,35 @@ public class MasterRpcServices extends RSRpcServices
throw new ServiceException(ioe);
}
}
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(RpcController controller,
+ MajorCompactionTimestampRequest request) throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestamp(ProtobufUtil
+ .toTableName(request.getTableName())));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ RpcController controller, MajorCompactionTimestampForRegionRequest request)
+ throws ServiceException {
+ MajorCompactionTimestampResponse.Builder response =
+ MajorCompactionTimestampResponse.newBuilder();
+ try {
+ master.checkInitialized();
+ response.setCompactionTimestamp(master.getLastMajorCompactionTimestampForRegion(request
+ .getRegion().getValue().toByteArray()));
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ return response.build();
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 627b3c5..458e53c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -250,4 +250,20 @@ public interface MasterServices extends Server {
* @throws IOException
*/
public List<TableName> listTableNamesByNamespace(String name) throws IOException;
+
+ /**
+ * @param table
+ * @return the timestamp of the last successful major compaction for the passed table,
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException;
+
+ /**
+ * @param regionName
+ * @return the timestamp of the last successful major compaction for the passed region
+ * or 0 if no HFile resulting from a major compaction exists
+ * @throws IOException
+ */
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 758af86..15270b3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -118,6 +118,7 @@ import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.io.hfile.BlockCache;
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
import org.apache.hadoop.hbase.ipc.RpcCallContext;
import org.apache.hadoop.hbase.ipc.RpcServer;
@@ -1492,6 +1493,28 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver { //
return Collections.min(lastStoreFlushTimeMap.values());
}
+ /**
+ * This can be used to determine the last time all files of this region were major compacted.
+ * @param majorCompactioOnly Only consider HFile that are the result of major compaction
+ * @return the timestamp of the oldest HFile for all stores of this region
+ */
+ public long getOldestHfileTs(boolean majorCompactioOnly) throws IOException {
+ long result = Long.MAX_VALUE;
+ for (Store store : getStores().values()) {
+ for (StoreFile file : store.getStorefiles()) {
+ HFile.Reader reader = file.getReader().getHFileReader();
+ if (majorCompactioOnly) {
+ byte[] val = reader.loadFileInfo().get(StoreFile.MAJOR_COMPACTION_KEY);
+ if (val == null || !Bytes.toBoolean(val)) {
+ continue;
+ }
+ }
+ result = Math.min(result, reader.getFileContext().getFileCreateTime());
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
//////////////////////////////////////////////////////////////////////////////
// HRegion maintenance.
//
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 484d437..a76c9c6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1328,7 +1328,7 @@ public class HRegionServer extends HasThread implements
* @throws IOException
*/
private RegionLoad createRegionLoad(final HRegion r, RegionLoad.Builder regionLoadBldr,
- RegionSpecifier.Builder regionSpecifier) {
+ RegionSpecifier.Builder regionSpecifier) throws IOException {
byte[] name = r.getRegionName();
int stores = 0;
int storefiles = 0;
@@ -1390,8 +1390,8 @@ public class HRegionServer extends HasThread implements
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
.setCompleteSequenceId(r.maxFlushedSeqId)
- .setDataLocality(dataLocality);
-
+ .setDataLocality(dataLocality)
+ .setLastMajorCompactionTs(r.getOldestHfileTs(true));
return regionLoadBldr.build();
}
@@ -1399,7 +1399,7 @@ public class HRegionServer extends HasThread implements
* @param encodedRegionName
* @return An instance of RegionLoad.
*/
- public RegionLoad createRegionLoad(final String encodedRegionName) {
+ public RegionLoad createRegionLoad(final String encodedRegionName) throws IOException {
HRegion r = null;
r = this.onlineRegions.get(encodedRegionName);
return r != null ? createRegionLoad(r, null, null) : null;
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 6a65038..94cf9d5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -991,6 +991,7 @@ public class HStore implements Store {
.withHBaseCheckSum(true)
.withDataBlockEncoding(family.getDataBlockEncoding())
.withEncryptionContext(cryptoContext)
+ .withCreateTime(EnvironmentEdgeManager.currentTime())
.build();
return hFileContext;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
index e7d8da2..400d929 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAdmin1.java
@@ -412,6 +412,58 @@ public class TestAdmin1 {
}
@Test (timeout=300000)
+ public void testCompactionTimestamps() throws Exception {
+ HColumnDescriptor fam1 = new HColumnDescriptor("fam1");
+ TableName tableName = TableName.valueOf("testCompactionTimestampsTable");
+ HTableDescriptor htd = new HTableDescriptor(tableName);
+ htd.addFamily(fam1);
+ this.admin.createTable(htd);
+ HTable table = (HTable)TEST_UTIL.getConnection().getTable(htd.getTableName());
+ long ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(0, ts);
+ Put p = new Put(Bytes.toBytes("row1"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // no files written -> no data
+ assertEquals(0, ts);
+
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // still 0, we flushed a file, but no major compaction happened
+ assertEquals(0, ts);
+
+ byte[] regionName =
+ table.getRegionLocator().getAllRegionLocations().get(0).getRegionInfo().getRegionName();
+ long ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ p = new Put(Bytes.toBytes("row2"));
+ p.add(Bytes.toBytes("fam1"), Bytes.toBytes("fam1"), Bytes.toBytes("fam1"));
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // make sure the region API returns the same value, as the old file is still around
+ assertEquals(ts1, ts);
+
+ TEST_UTIL.compact(tableName, true);
+ table.put(p);
+ // forces a wait for the compaction
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ // after a compaction our earliest timestamp will have progressed forward
+ assertTrue(ts > ts1);
+
+ // region api still the same
+ ts1 = this.admin.getLastMajorCompactionTimestampForRegion(regionName);
+ assertEquals(ts, ts1);
+ table.put(p);
+ this.admin.flush(tableName);
+ ts = this.admin.getLastMajorCompactionTimestamp(tableName);
+ assertEquals(ts, ts1);
+ table.close();
+ }
+
+ @Test (timeout=300000)
public void testHColumnValidName() {
boolean exceptionThrown;
try {
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
index 912c600..e4c1dbb 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestCatalogJanitor.java
@@ -427,6 +427,18 @@ public class TestCatalogJanitor {
// Auto-generated method stub
return false;
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(TableName table) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(byte[] regionName) throws IOException {
+ // Auto-generated method stub
+ return 0;
+ }
}
@Test
[2/3] hbase git commit: HBASE-12859 New master API to track major
compaction completion.
Posted by la...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 6821a81..9150e43 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -41475,6 +41475,1570 @@ public final class MasterProtos {
// @@protoc_insertion_point(class_scope:IsProcedureDoneResponse)
}
+ public interface MajorCompactionTimestampRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .TableName table_name = 1;
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ boolean hasTableName();
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName();
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampRequest}
+ */
+ public static final class MajorCompactionTimestampRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements MajorCompactionTimestampRequestOrBuilder {
+ // Use MajorCompactionTimestampRequest.newBuilder() to construct.
+ private MajorCompactionTimestampRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MajorCompactionTimestampRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MajorCompactionTimestampRequest defaultInstance;
+ public static MajorCompactionTimestampRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MajorCompactionTimestampRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MajorCompactionTimestampRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = tableName_.toBuilder();
+ }
+ tableName_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(tableName_);
+ tableName_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<MajorCompactionTimestampRequest> PARSER =
+ new com.google.protobuf.AbstractParser<MajorCompactionTimestampRequest>() {
+ public MajorCompactionTimestampRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MajorCompactionTimestampRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<MajorCompactionTimestampRequest> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .TableName table_name = 1;
+ public static final int TABLE_NAME_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_;
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ return tableName_;
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ return tableName_;
+ }
+
+ private void initFields() {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasTableName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, tableName_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, tableName_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) obj;
+
+ boolean result = true;
+ result = result && (hasTableName() == other.hasTableName());
+ if (hasTableName()) {
+ result = result && getTableName()
+ .equals(other.getTableName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasTableName()) {
+ hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+ hash = (53 * hash) + getTableName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getTableNameFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (tableNameBuilder_ == null) {
+ result.tableName_ = tableName_;
+ } else {
+ result.tableName_ = tableNameBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance()) return this;
+ if (other.hasTableName()) {
+ mergeTableName(other.getTableName());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasTableName()) {
+
+ return false;
+ }
+ if (!getTableName().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .TableName table_name = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public boolean hasTableName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName getTableName() {
+ if (tableNameBuilder_ == null) {
+ return tableName_;
+ } else {
+ return tableNameBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public Builder setTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ tableName_ = value;
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public Builder setTableName(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+ if (tableNameBuilder_ == null) {
+ tableName_ = builderForValue.build();
+ onChanged();
+ } else {
+ tableNameBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public Builder mergeTableName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName value) {
+ if (tableNameBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ tableName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+ tableName_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+ } else {
+ tableName_ = value;
+ }
+ onChanged();
+ } else {
+ tableNameBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public Builder clearTableName() {
+ if (tableNameBuilder_ == null) {
+ tableName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.getDefaultInstance();
+ onChanged();
+ } else {
+ tableNameBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getTableNameFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+ if (tableNameBuilder_ != null) {
+ return tableNameBuilder_.getMessageOrBuilder();
+ } else {
+ return tableName_;
+ }
+ }
+ /**
+ * <code>required .TableName table_name = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>
+ getTableNameFieldBuilder() {
+ if (tableNameBuilder_ == null) {
+ tableNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+ tableName_,
+ getParentForChildren(),
+ isClean());
+ tableName_ = null;
+ }
+ return tableNameBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampRequest)
+ }
+
+ static {
+ defaultInstance = new MajorCompactionTimestampRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MajorCompactionTimestampRequest)
+ }
+
+ public interface MajorCompactionTimestampForRegionRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required .RegionSpecifier region = 1;
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ boolean hasRegion();
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion();
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder();
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampForRegionRequest}
+ */
+ public static final class MajorCompactionTimestampForRegionRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements MajorCompactionTimestampForRegionRequestOrBuilder {
+ // Use MajorCompactionTimestampForRegionRequest.newBuilder() to construct.
+ private MajorCompactionTimestampForRegionRequest(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MajorCompactionTimestampForRegionRequest(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MajorCompactionTimestampForRegionRequest defaultInstance;
+ public static MajorCompactionTimestampForRegionRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MajorCompactionTimestampForRegionRequest(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 10: {
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ subBuilder = region_.toBuilder();
+ }
+ region_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
+ if (subBuilder != null) {
+ subBuilder.mergeFrom(region_);
+ region_ = subBuilder.buildPartial();
+ }
+ bitField0_ |= 0x00000001;
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<MajorCompactionTimestampForRegionRequest> PARSER =
+ new com.google.protobuf.AbstractParser<MajorCompactionTimestampForRegionRequest>() {
+ public MajorCompactionTimestampForRegionRequest parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MajorCompactionTimestampForRegionRequest(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<MajorCompactionTimestampForRegionRequest> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required .RegionSpecifier region = 1;
+ public static final int REGION_FIELD_NUMBER = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_;
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ return region_;
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ return region_;
+ }
+
+ private void initFields() {
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRegion()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!getRegion().isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeMessage(1, region_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(1, region_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegion() == other.hasRegion());
+ if (hasRegion()) {
+ result = result && getRegion()
+ .equals(other.getRegion());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegion()) {
+ hash = (37 * hash) + REGION_FIELD_NUMBER;
+ hash = (53 * hash) + getRegion().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampForRegionRequest}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getRegionFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ if (regionBuilder_ == null) {
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ } else {
+ regionBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ if (regionBuilder_ == null) {
+ result.region_ = region_;
+ } else {
+ result.region_ = regionBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance()) return this;
+ if (other.hasRegion()) {
+ mergeRegion(other.getRegion());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasRegion()) {
+
+ return false;
+ }
+ if (!getRegion().isInitialized()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required .RegionSpecifier region = 1;
+ private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionBuilder_;
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public boolean hasRegion() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegion() {
+ if (regionBuilder_ == null) {
+ return region_;
+ } else {
+ return regionBuilder_.getMessage();
+ }
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public Builder setRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ region_ = value;
+ onChanged();
+ } else {
+ regionBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public Builder setRegion(
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
+ if (regionBuilder_ == null) {
+ region_ = builderForValue.build();
+ onChanged();
+ } else {
+ regionBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public Builder mergeRegion(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
+ if (regionBuilder_ == null) {
+ if (((bitField0_ & 0x00000001) == 0x00000001) &&
+ region_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
+ region_ =
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(region_).mergeFrom(value).buildPartial();
+ } else {
+ region_ = value;
+ }
+ onChanged();
+ } else {
+ regionBuilder_.mergeFrom(value);
+ }
+ bitField0_ |= 0x00000001;
+ return this;
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public Builder clearRegion() {
+ if (regionBuilder_ == null) {
+ region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
+ onChanged();
+ } else {
+ regionBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionBuilder() {
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return getRegionFieldBuilder().getBuilder();
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionOrBuilder() {
+ if (regionBuilder_ != null) {
+ return regionBuilder_.getMessageOrBuilder();
+ } else {
+ return region_;
+ }
+ }
+ /**
+ * <code>required .RegionSpecifier region = 1;</code>
+ */
+ private com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
+ getRegionFieldBuilder() {
+ if (regionBuilder_ == null) {
+ regionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
+ region_,
+ getParentForChildren(),
+ isClean());
+ region_ = null;
+ }
+ return regionBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampForRegionRequest)
+ }
+
+ static {
+ defaultInstance = new MajorCompactionTimestampForRegionRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MajorCompactionTimestampForRegionRequest)
+ }
+
+ public interface MajorCompactionTimestampResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required int64 compaction_timestamp = 1;
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ boolean hasCompactionTimestamp();
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ long getCompactionTimestamp();
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampResponse}
+ */
+ public static final class MajorCompactionTimestampResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements MajorCompactionTimestampResponseOrBuilder {
+ // Use MajorCompactionTimestampResponse.newBuilder() to construct.
+ private MajorCompactionTimestampResponse(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
+ super(builder);
+ this.unknownFields = builder.getUnknownFields();
+ }
+ private MajorCompactionTimestampResponse(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+ private static final MajorCompactionTimestampResponse defaultInstance;
+ public static MajorCompactionTimestampResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public MajorCompactionTimestampResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ private final com.google.protobuf.UnknownFieldSet unknownFields;
+ @java.lang.Override
+ public final com.google.protobuf.UnknownFieldSet
+ getUnknownFields() {
+ return this.unknownFields;
+ }
+ private MajorCompactionTimestampResponse(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ initFields();
+ int mutable_bitField0_ = 0;
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder();
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ done = true;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ compactionTimestamp_ = input.readInt64();
+ break;
+ }
+ }
+ }
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(this);
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(
+ e.getMessage()).setUnfinishedMessage(this);
+ } finally {
+ this.unknownFields = unknownFields.build();
+ makeExtensionsImmutable();
+ }
+ }
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class);
+ }
+
+ public static com.google.protobuf.Parser<MajorCompactionTimestampResponse> PARSER =
+ new com.google.protobuf.AbstractParser<MajorCompactionTimestampResponse>() {
+ public MajorCompactionTimestampResponse parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return new MajorCompactionTimestampResponse(input, extensionRegistry);
+ }
+ };
+
+ @java.lang.Override
+ public com.google.protobuf.Parser<MajorCompactionTimestampResponse> getParserForType() {
+ return PARSER;
+ }
+
+ private int bitField0_;
+ // required int64 compaction_timestamp = 1;
+ public static final int COMPACTION_TIMESTAMP_FIELD_NUMBER = 1;
+ private long compactionTimestamp_;
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public boolean hasCompactionTimestamp() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public long getCompactionTimestamp() {
+ return compactionTimestamp_;
+ }
+
+ private void initFields() {
+ compactionTimestamp_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasCompactionTimestamp()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeInt64(1, compactionTimestamp_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt64Size(1, compactionTimestamp_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) obj;
+
+ boolean result = true;
+ result = result && (hasCompactionTimestamp() == other.hasCompactionTimestamp());
+ if (hasCompactionTimestamp()) {
+ result = result && (getCompactionTimestamp()
+ == other.getCompactionTimestamp());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ private int memoizedHashCode = 0;
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasCompactionTimestamp()) {
+ hash = (37 * hash) + COMPACTION_TIMESTAMP_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCompactionTimestamp());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseDelimitedFrom(input, extensionRegistry);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input);
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return PARSER.parseFrom(input, extensionRegistry);
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * Protobuf type {@code MajorCompactionTimestampResponse}
+ */
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class, org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.Builder.class);
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ compactionTimestamp_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.internal_static_MajorCompactionTimestampResponse_descriptor;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse result = new org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.compactionTimestamp_ = compactionTimestamp_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()) return this;
+ if (other.hasCompactionTimestamp()) {
+ setCompactionTimestamp(other.getCompactionTimestamp());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasCompactionTimestamp()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse parsedMessage = null;
+ try {
+ parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) e.getUnfinishedMessage();
+ throw e;
+ } finally {
+ if (parsedMessage != null) {
+ mergeFrom(parsedMessage);
+ }
+ }
+ return this;
+ }
+ private int bitField0_;
+
+ // required int64 compaction_timestamp = 1;
+ private long compactionTimestamp_ ;
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public boolean hasCompactionTimestamp() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public long getCompactionTimestamp() {
+ return compactionTimestamp_;
+ }
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public Builder setCompactionTimestamp(long value) {
+ bitField0_ |= 0x00000001;
+ compactionTimestamp_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>required int64 compaction_timestamp = 1;</code>
+ */
+ public Builder clearCompactionTimestamp() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ compactionTimestamp_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:MajorCompactionTimestampResponse)
+ }
+
+ static {
+ defaultInstance = new MajorCompactionTimestampResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:MajorCompactionTimestampResponse)
+ }
+
/**
* Protobuf service {@code MasterService}
*/
@@ -42025,6 +43589,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse> done);
+ /**
+ * <code>rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);</code>
+ *
+ * <pre>
+ ** Returns the timestamp of the last major compaction
+ * </pre>
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done);
+
+ /**
+ * <code>rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);</code>
+ *
+ * <pre>
+ ** Returns the timestamp of the last major compaction
+ * </pre>
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -42374,6 +43962,22 @@ public final class MasterProtos {
impl.listTableNamesByNamespace(controller, request, done);
}
+ @java.lang.Override
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done) {
+ impl.getLastMajorCompactionTimestamp(controller, request, done);
+ }
+
+ @java.lang.Override
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done) {
+ impl.getLastMajorCompactionTimestampForRegion(controller, request, done);
+ }
+
};
}
@@ -42482,6 +44086,10 @@ public final class MasterProtos {
return impl.listTableDescriptorsByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest)request);
case 42:
return impl.listTableNamesByNamespace(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest)request);
+ case 43:
+ return impl.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request);
+ case 44:
+ return impl.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -42582,6 +44190,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -42682,6 +44294,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43232,6 +44848,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse> done);
+ /**
+ * <code>rpc getLastMajorCompactionTimestamp(.MajorCompactionTimestampRequest) returns (.MajorCompactionTimestampResponse);</code>
+ *
+ * <pre>
+ ** Returns the timestamp of the last major compaction
+ * </pre>
+ */
+ public abstract void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done);
+
+ /**
+ * <code>rpc getLastMajorCompactionTimestampForRegion(.MajorCompactionTimestampForRegionRequest) returns (.MajorCompactionTimestampResponse);</code>
+ *
+ * <pre>
+ ** Returns the timestamp of the last major compaction
+ * </pre>
+ */
+ public abstract void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -43469,6 +45109,16 @@ public final class MasterProtos {
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse>specializeCallback(
done));
return;
+ case 43:
+ this.getLastMajorCompactionTimestamp(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse>specializeCallback(
+ done));
+ return;
+ case 44:
+ this.getLastMajorCompactionTimestampForRegion(controller, (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse>specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43569,6 +45219,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -43669,6 +45323,10 @@ public final class MasterProtos {
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse.getDefaultInstance();
case 42:
return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance();
+ case 43:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
+ case 44:
+ return org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -44334,6 +45992,36 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.class,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance()));
}
+
+ public void getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(43),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
+
+ public void getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(44),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -44556,6 +46244,16 @@ public final class MasterProtos {
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -45080,6 +46778,30 @@ public final class MasterProtos {
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(43),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
+
+ public org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(44),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse.getDefaultInstance());
+ }
+
}
// @@protoc_insertion_point(class_scope:MasterService)
@@ -45495,6 +47217,21 @@ public final class MasterProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_IsProcedureDoneResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampForRegionRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampForRegionRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_MajorCompactionTimestampResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_MajorCompactionTimestampResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -45616,85 +47353,96 @@ public final class MasterProtos {
"t\022(\n\tprocedure\030\001 \001(\0132\025.ProcedureDescript",
"ion\"W\n\027IsProcedureDoneResponse\022\023\n\004done\030\001" +
" \001(\010:\005false\022\'\n\010snapshot\030\002 \001(\0132\025.Procedur" +
- "eDescription2\365\027\n\rMasterService\022S\n\024GetSch" +
- "emaAlterStatus\022\034.GetSchemaAlterStatusReq" +
- "uest\032\035.GetSchemaAlterStatusResponse\022P\n\023G" +
- "etTableDescriptors\022\033.GetTableDescriptors" +
- "Request\032\034.GetTableDescriptorsResponse\022>\n" +
- "\rGetTableNames\022\025.GetTableNamesRequest\032\026." +
- "GetTableNamesResponse\022G\n\020GetClusterStatu" +
- "s\022\030.GetClusterStatusRequest\032\031.GetCluster",
- "StatusResponse\022D\n\017IsMasterRunning\022\027.IsMa" +
- "sterRunningRequest\032\030.IsMasterRunningResp" +
- "onse\0222\n\tAddColumn\022\021.AddColumnRequest\032\022.A" +
- "ddColumnResponse\022;\n\014DeleteColumn\022\024.Delet" +
- "eColumnRequest\032\025.DeleteColumnResponse\022;\n" +
- "\014ModifyColumn\022\024.ModifyColumnRequest\032\025.Mo" +
- "difyColumnResponse\0225\n\nMoveRegion\022\022.MoveR" +
- "egionRequest\032\023.MoveRegionResponse\022Y\n\026Dis" +
- "patchMergingRegions\022\036.DispatchMergingReg" +
- "ionsRequest\032\037.DispatchMergingRegionsResp",
- "onse\022;\n\014AssignRegion\022\024.AssignRegionReque" +
- "st\032\025.AssignRegionResponse\022A\n\016UnassignReg" +
- "ion\022\026.UnassignRegionRequest\032\027.UnassignRe" +
- "gionResponse\022>\n\rOfflineRegion\022\025.OfflineR" +
- "egionRequest\032\026.OfflineRegionResponse\0228\n\013" +
- "DeleteTable\022\023.DeleteTableRequest\032\024.Delet" +
- "eTableResponse\022>\n\rtruncateTable\022\025.Trunca" +
- "teTableRequest\032\026.TruncateTableResponse\0228" +
- "\n\013EnableTable\022\023.EnableTableRequest\032\024.Ena" +
- "bleTableResponse\022;\n\014DisableTable\022\024.Disab",
- "leTableRequest\032\025.DisableTableResponse\0228\n" +
- "\013ModifyTable\022\023.ModifyTableRequest\032\024.Modi" +
- "fyTableResponse\0228\n\013CreateTable\022\023.CreateT" +
- "ableRequest\032\024.CreateTableResponse\022/\n\010Shu" +
- "tdown\022\020.ShutdownRequest\032\021.ShutdownRespon" +
- "se\0225\n\nStopMaster\022\022.StopMasterRequest\032\023.S" +
- "topMasterResponse\022,\n\007Balance\022\017.BalanceRe" +
- "quest\032\020.BalanceResponse\022M\n\022SetBalancerRu" +
- "nning\022\032.SetBalancerRunningRequest\032\033.SetB" +
- "alancerRunningResponse\022A\n\016RunCatalogScan",
- "\022\026.RunCatalogScanRequest\032\027.RunCatalogSca" +
- "nResponse\022S\n\024EnableCatalogJanitor\022\034.Enab" +
- "leCatalogJanitorRequest\032\035.EnableCatalogJ" +
- "anitorResponse\022\\\n\027IsCatalogJanitorEnable" +
- "d\022\037.IsCatalogJanitorEnabledRequest\032 .IsC" +
- "atalogJanitorEnabledResponse\022L\n\021ExecMast" +
- "erService\022\032.CoprocessorServiceRequest\032\033." +
- "CoprocessorServiceResponse\022/\n\010Snapshot\022\020" +
- ".SnapshotRequest\032\021.SnapshotResponse\022V\n\025G" +
- "etCompletedSnapshots\022\035.GetCompletedSnaps",
- "hotsRequest\032\036.GetCompletedSnapshotsRespo" +
- "nse\022A\n\016DeleteSnapshot\022\026.DeleteSnapshotRe" +
- "quest\032\027.DeleteSnapshotResponse\022A\n\016IsSnap" +
- "shotDone\022\026.IsSnapshotDoneRequest\032\027.IsSna" +
- "pshotDoneResponse\022D\n\017RestoreSnapshot\022\027.R" +
- "estoreSnapshotRequest\032\030.RestoreSnapshotR" +
- "esponse\022V\n\025IsRestoreSnapshotDone\022\035.IsRes" +
- "toreSnapshotDoneRequest\032\036.IsRestoreSnaps" +
- "hotDoneResponse\022>\n\rExecProcedure\022\025.ExecP" +
- "rocedureRequest\032\026.ExecProcedureResponse\022",
- "E\n\024ExecProcedureWithRet\022\025.ExecProcedureR" +
- "equest\032\026.ExecProcedureResponse\022D\n\017IsProc" +
- "edureDone\022\027.IsProcedureDoneRequest\032\030.IsP" +
- "rocedureDoneResponse\022D\n\017ModifyNamespace\022" +
- "\027.ModifyNamespaceRequest\032\030.ModifyNamespa" +
- "ceResponse\022D\n\017CreateNamespace\022\027.CreateNa" +
- "mespaceRequest\032\030.CreateNamespaceResponse" +
- "\022D\n\017DeleteNamespace\022\027.DeleteNamespaceReq" +
- "uest\032\030.DeleteNamespaceResponse\022Y\n\026GetNam" +
- "espaceDescriptor\022\036.GetNamespaceDescripto",
- "rRequest\032\037.GetNamespaceDescriptorRespons" +
- "e\022_\n\030ListNamespaceDescriptors\022 .ListName" +
- "spaceDescriptorsRequest\032!.ListNamespaceD" +
- "escriptorsResponse\022t\n\037ListTableDescripto" +
- "rsByNamespace\022\'.ListTableDescriptorsByNa" +
- "mespaceRequest\032(.ListTableDescriptorsByN" +
- "amespaceResponse\022b\n\031ListTableNamesByName" +
- "space\022!.ListTableNamesByNamespaceRequest" +
- "\032\".ListTableNamesByNamespaceResponseBB\n*" +
- "org.apache.hadoop.hbase.protobuf.generat",
- "edB\014MasterProtosH\001\210\001\001\240\001\001"
+ "eDescription\"A\n\037MajorCompactionTimestamp" +
+ "Request\022\036\n\ntable_name\030\001 \002(\0132\n.TableName\"" +
+ "L\n(MajorCompactionTimestampForRegionRequ" +
+ "est\022 \n\006region\030\001 \002(\0132\020.RegionSpecifier\"@\n" +
+ " MajorCompactionTimestampResponse\022\034\n\024com" +
+ "paction_timestamp\030\001 \002(\0032\327\031\n\rMasterServic" +
+ "e\022S\n\024GetSchemaAlterStatus\022\034.GetSchemaAlt" +
+ "erStatusRequest\032\035.GetSchemaAlterStatusRe",
+ "sponse\022P\n\023GetTableDescriptors\022\033.GetTable" +
+ "DescriptorsRequest\032\034.GetTableDescriptors" +
+ "Response\022>\n\rGetTableNames\022\025.GetTableName" +
+ "sRequest\032\026.GetTableNamesResponse\022G\n\020GetC" +
+ "lusterStatus\022\030.GetClusterStatusRequest\032\031" +
+ ".GetClusterStatusResponse\022D\n\017IsMasterRun" +
+ "ning\022\027.IsMasterRunningRequest\032\030.IsMaster" +
+ "RunningResponse\0222\n\tAddColumn\022\021.AddColumn" +
+ "Request\032\022.AddColumnResponse\022;\n\014DeleteCol" +
+ "umn\022\024.DeleteColumnRequest\032\025.DeleteColumn",
+ "Response\022;\n\014ModifyColumn\022\024.ModifyColumnR" +
+ "equest\032\025.ModifyColumnResponse\0225\n\nMoveReg" +
+ "ion\022\022.MoveRegionRequest\032\023.MoveRegionResp" +
+ "onse\022Y\n\026DispatchMergingRegions\022\036.Dispatc" +
+ "hMergingRegionsRequest\032\037.DispatchMerging" +
+ "RegionsResponse\022;\n\014AssignRegion\022\024.Assign" +
+ "RegionRequest\
<TRUNCATED>
[3/3] hbase git commit: HBASE-12859 New master API to track major
compaction completion.
Posted by la...@apache.org.
HBASE-12859 New master API to track major compaction completion.
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0b3502f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0b3502f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0b3502f2
Branch: refs/heads/branch-1
Commit: 0b3502f2b114cd937f57e447f765876d73d99df8
Parents: 8ddc90c
Author: Lars Hofhansl <la...@apache.org>
Authored: Thu Jan 29 13:57:24 2015 -0800
Committer: Lars Hofhansl <la...@apache.org>
Committed: Thu Jan 29 13:57:24 2015 -0800
----------------------------------------------------------------------
.../org/apache/hadoop/hbase/ClusterStatus.java | 23 +
.../org/apache/hadoop/hbase/RegionLoad.java | 12 +-
.../org/apache/hadoop/hbase/client/Admin.java | 26 +
.../hadoop/hbase/client/ConnectionManager.java | 17 +
.../apache/hadoop/hbase/client/HBaseAdmin.java | 32 +
.../hadoop/hbase/io/hfile/HFileContext.java | 16 +-
.../hbase/io/hfile/HFileContextBuilder.java | 9 +-
.../protobuf/generated/ClusterStatusProtos.java | 135 +-
.../hbase/protobuf/generated/MasterProtos.java | 1924 +++++++++++++++++-
.../src/main/protobuf/ClusterStatus.proto | 2 +
hbase-protocol/src/main/protobuf/Master.proto | 20 +
.../hbase/io/hfile/AbstractHFileWriter.java | 3 +
.../org/apache/hadoop/hbase/io/hfile/HFile.java | 1 +
.../hadoop/hbase/io/hfile/HFileReaderV2.java | 1 +
.../org/apache/hadoop/hbase/master/HMaster.java | 10 +
.../hadoop/hbase/master/MasterRpcServices.java | 34 +
.../hadoop/hbase/master/MasterServices.java | 16 +
.../hadoop/hbase/regionserver/HRegion.java | 23 +
.../hbase/regionserver/HRegionServer.java | 8 +-
.../hadoop/hbase/regionserver/HStore.java | 1 +
.../apache/hadoop/hbase/client/TestAdmin1.java | 52 +
.../hadoop/hbase/master/TestCatalogJanitor.java | 12 +
22 files changed, 2268 insertions(+), 109 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index 40fcfcf..4f07964 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -267,6 +267,29 @@ public class ClusterStatus extends VersionedWritable {
return masterCoprocessors;
}
+ public long getLastMajorCompactionTsForTable(TableName table) {
+ long result = Long.MAX_VALUE;
+ for (ServerName server : getServers()) {
+ ServerLoad load = getLoad(server);
+ for (RegionLoad rl : load.getRegionsLoad().values()) {
+ if (table.equals(HRegionInfo.getTable(rl.getName()))) {
+ result = Math.min(result, rl.getLastMajorCompactionTs());
+ }
+ }
+ }
+ return result == Long.MAX_VALUE ? 0 : result;
+ }
+
+ public long getLastMajorCompactionTsForRegion(final byte[] region) {
+ for (ServerName server : getServers()) {
+ ServerLoad load = getLoad(server);
+ RegionLoad rl = load.getRegionsLoad().get(region);
+ if (rl != null) {
+ return rl.getLastMajorCompactionTs();
+ }
+ }
+ return 0;
+ }
public boolean isBalancerOn() {
return balancerOn != null && balancerOn;
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index 234c5ae..794e8b2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -169,6 +169,14 @@ public class RegionLoad {
}
return 0.0f;
}
+
+ /**
+ * @return the timestamp of the oldest hfile for any store of this region.
+ */
+ public long getLastMajorCompactionTs() {
+ return regionLoadPB.getLastMajorCompactionTs();
+ }
+
/**
* @see java.lang.Object#toString()
*/
@@ -179,7 +187,9 @@ public class RegionLoad {
sb = Strings.appendKeyValue(sb, "numberOfStorefiles",
this.getStorefiles());
sb = Strings.appendKeyValue(sb, "storefileUncompressedSizeMB",
- this.getStoreUncompressedSizeMB());
+ this.getStoreUncompressedSizeMB());
+ sb = Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp",
+ this.getLastMajorCompactionTs());
sb = Strings.appendKeyValue(sb, "storefileSizeMB",
this.getStorefileSizeMB());
if (this.getStoreUncompressedSizeMB() != 0) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index eedbdcb..852ac42 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -954,6 +954,32 @@ public interface Admin extends Abortable, Closeable {
final byte[] regionName) throws IOException;
/**
+ * Get the timestamp of the last major compaction for the passed table
+ *
+ * The timestamp of the oldest HFile resulting from a major compaction of that table,
+ * or 0 if no such HFile could be found.
+ *
+ * @param tableName table to examine
+ * @return the last major compaction timestamp or 0
+ * @throws IOException if a remote or network exception occurs
+ */
+ long getLastMajorCompactionTimestamp(final TableName tableName)
+ throws IOException;
+
+ /**
+ * Get the timestamp of the last major compaction for the passed region.
+ *
+ * The timestamp of the oldest HFile resulting from a major compaction of that region,
+ * or 0 if no such HFile could be found.
+ *
+ * @param regionName region to examine
+ * @return the last major compaction timestamp or 0
+ * @throws IOException if a remote or network exception occurs
+ */
+ long getLastMajorCompactionTimestampForRegion(final byte[] regionName)
+ throws IOException;
+
+ /**
* Take a snapshot for the given table. If the table is enabled, a FLUSH-type snapshot will be
* taken. If the table is disabled, an offline snapshot is taken. Snapshots are considered unique
* based on <b>the name of the snapshot</b>. Attempts to take a snapshot with the same name (even
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index fd8a51d..b22d456 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -134,6 +134,9 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescript
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
@@ -2006,6 +2009,20 @@ class ConnectionManager {
throws ServiceException {
return stub.getClusterStatus(controller, request);
}
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestamp(
+ RpcController controller, MajorCompactionTimestampRequest request)
+ throws ServiceException {
+ return stub.getLastMajorCompactionTimestamp(controller, request);
+ }
+
+ @Override
+ public MajorCompactionTimestampResponse getLastMajorCompactionTimestampForRegion(
+ RpcController controller, MajorCompactionTimestampForRegionRequest request)
+ throws ServiceException {
+ return stub.getLastMajorCompactionTimestampForRegion(controller, request);
+ }
};
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 1f4ce6d..520b953 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateConfiguratio
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ProcedureDescription;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.TableSchema;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
@@ -115,6 +116,8 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRes
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
@@ -3721,4 +3724,33 @@ public class HBaseAdmin implements Admin {
throw new IOException("Failed to get master info port from MasterAddressTracker", e);
}
}
+
+ @Override
+ public long getLastMajorCompactionTimestamp(final TableName tableName) throws IOException {
+ return executeCallable(new MasterCallable<Long>(getConnection()) {
+ @Override
+ public Long call(int callTimeout) throws ServiceException {
+ MajorCompactionTimestampRequest req =
+ MajorCompactionTimestampRequest.newBuilder()
+ .setTableName(ProtobufUtil.toProtoTableName(tableName)).build();
+ return master.getLastMajorCompactionTimestamp(null, req).getCompactionTimestamp();
+ }
+ });
+ }
+
+ @Override
+ public long getLastMajorCompactionTimestampForRegion(final byte[] regionName) throws IOException {
+ return executeCallable(new MasterCallable<Long>(getConnection()) {
+ @Override
+ public Long call(int callTimeout) throws ServiceException {
+ MajorCompactionTimestampForRegionRequest req =
+ MajorCompactionTimestampForRegionRequest
+ .newBuilder()
+ .setRegion(
+ RequestConverter
+ .buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)).build();
+ return master.getLastMajorCompactionTimestampForRegion(null, req).getCompactionTimestamp();
+ }
+ });
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
index 83fe701..02af483 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContext.java
@@ -56,6 +56,7 @@ public class HFileContext implements HeapSize, Cloneable {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Encryption algorithm and key used */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime;
//Empty constructor. Go with setters
public HFileContext() {
@@ -76,12 +77,13 @@ public class HFileContext implements HeapSize, Cloneable {
this.blocksize = context.blocksize;
this.encoding = context.encoding;
this.cryptoContext = context.cryptoContext;
+ this.fileCreateTime = context.fileCreateTime;
}
public HFileContext(boolean useHBaseChecksum, boolean includesMvcc, boolean includesTags,
Compression.Algorithm compressAlgo, boolean compressTags, ChecksumType checksumType,
int bytesPerChecksum, int blockSize, DataBlockEncoding encoding,
- Encryption.Context cryptoContext) {
+ Encryption.Context cryptoContext, long fileCreateTime) {
this.usesHBaseChecksum = useHBaseChecksum;
this.includesMvcc = includesMvcc;
this.includesTags = includesTags;
@@ -94,6 +96,7 @@ public class HFileContext implements HeapSize, Cloneable {
this.encoding = encoding;
}
this.cryptoContext = cryptoContext;
+ this.fileCreateTime = fileCreateTime;
}
/**
@@ -141,6 +144,10 @@ public class HFileContext implements HeapSize, Cloneable {
this.includesTags = includesTags;
}
+ public void setFileCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ }
+
public boolean isCompressTags() {
return compressTags;
}
@@ -161,6 +168,10 @@ public class HFileContext implements HeapSize, Cloneable {
return blocksize;
}
+ public long getFileCreateTime() {
+ return fileCreateTime;
+ }
+
public DataBlockEncoding getDataBlockEncoding() {
return encoding;
}
@@ -189,7 +200,8 @@ public class HFileContext implements HeapSize, Cloneable {
4 * ClassSize.REFERENCE +
2 * Bytes.SIZEOF_INT +
// usesHBaseChecksum, includesMvcc, includesTags and compressTags
- 4 * Bytes.SIZEOF_BOOLEAN);
+ 4 * Bytes.SIZEOF_BOOLEAN +
+ Bytes.SIZEOF_LONG);
return size;
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
index 9a4234a..7416f4e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileContextBuilder.java
@@ -52,6 +52,7 @@ public class HFileContextBuilder {
private DataBlockEncoding encoding = DataBlockEncoding.NONE;
/** Crypto context */
private Encryption.Context cryptoContext = Encryption.Context.NONE;
+ private long fileCreateTime = 0;
public HFileContextBuilder withHBaseCheckSum(boolean useHBaseCheckSum) {
this.usesHBaseChecksum = useHBaseCheckSum;
@@ -103,8 +104,14 @@ public class HFileContextBuilder {
return this;
}
+ public HFileContextBuilder withCreateTime(long fileCreateTime) {
+ this.fileCreateTime = fileCreateTime;
+ return this;
+ }
+
public HFileContext build() {
return new HFileContext(usesHBaseChecksum, includesMvcc, includesTags, compression,
- compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext);
+ compressTags, checksumType, bytesPerChecksum, blocksize, encoding, cryptoContext,
+ fileCreateTime);
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/0b3502f2/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5bc44ff..6dc48fa 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -2171,6 +2171,16 @@ public final class ClusterStatusProtos {
* </pre>
*/
float getDataLocality();
+
+ // optional uint64 last_major_compaction_ts = 17 [default = 0];
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ boolean hasLastMajorCompactionTs();
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ long getLastMajorCompactionTs();
}
/**
* Protobuf type {@code RegionLoad}
@@ -2311,6 +2321,11 @@ public final class ClusterStatusProtos {
dataLocality_ = input.readFloat();
break;
}
+ case 136: {
+ bitField0_ |= 0x00010000;
+ lastMajorCompactionTs_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -2753,6 +2768,22 @@ public final class ClusterStatusProtos {
return dataLocality_;
}
+ // optional uint64 last_major_compaction_ts = 17 [default = 0];
+ public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
+ private long lastMajorCompactionTs_;
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public boolean hasLastMajorCompactionTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public long getLastMajorCompactionTs() {
+ return lastMajorCompactionTs_;
+ }
+
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@@ -2770,6 +2801,7 @@ public final class ClusterStatusProtos {
totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L;
dataLocality_ = 0F;
+ lastMajorCompactionTs_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -2839,6 +2871,9 @@ public final class ClusterStatusProtos {
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ output.writeUInt64(17, lastMajorCompactionTs_);
+ }
getUnknownFields().writeTo(output);
}
@@ -2912,6 +2947,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_);
}
+ if (((bitField0_ & 0x00010000) == 0x00010000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(17, lastMajorCompactionTs_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -3014,6 +3053,11 @@ public final class ClusterStatusProtos {
if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
}
+ result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
+ if (hasLastMajorCompactionTs()) {
+ result = result && (getLastMajorCompactionTs()
+ == other.getLastMajorCompactionTs());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -3092,6 +3136,10 @@ public final class ClusterStatusProtos {
hash = (53 * hash) + Float.floatToIntBits(
getDataLocality());
}
+ if (hasLastMajorCompactionTs()) {
+ hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -3238,6 +3286,8 @@ public final class ClusterStatusProtos {
bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000);
+ lastMajorCompactionTs_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00010000);
return this;
}
@@ -3334,6 +3384,10 @@ public final class ClusterStatusProtos {
to_bitField0_ |= 0x00008000;
}
result.dataLocality_ = dataLocality_;
+ if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
+ to_bitField0_ |= 0x00010000;
+ }
+ result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -3398,6 +3452,9 @@ public final class ClusterStatusProtos {
if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality());
}
+ if (other.hasLastMajorCompactionTs()) {
+ setLastMajorCompactionTs(other.getLastMajorCompactionTs());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -4337,6 +4394,39 @@ public final class ClusterStatusProtos {
return this;
}
+ // optional uint64 last_major_compaction_ts = 17 [default = 0];
+ private long lastMajorCompactionTs_ ;
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public boolean hasLastMajorCompactionTs() {
+ return ((bitField0_ & 0x00010000) == 0x00010000);
+ }
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public long getLastMajorCompactionTs() {
+ return lastMajorCompactionTs_;
+ }
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public Builder setLastMajorCompactionTs(long value) {
+ bitField0_ |= 0x00010000;
+ lastMajorCompactionTs_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
+ */
+ public Builder clearLastMajorCompactionTs() {
+ bitField0_ = (bitField0_ & ~0x00010000);
+ lastMajorCompactionTs_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@@ -10472,7 +10562,7 @@ public final class ClusterStatusProtos {
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
- "e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
+ "e\"\214\004\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
@@ -10484,26 +10574,27 @@ public final class ClusterStatusProtos {
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
- "ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
- "requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
- "ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" +
- "ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
- "onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
- "or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
- "end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
- "\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
- "verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
- "d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
- "(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
- "vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
- "ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
- "nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
- "uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
- "rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
- "\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
- "(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
- "org.apache.hadoop.hbase.protobuf.generat" +
- "edB\023ClusterStatusProtosH\001\240\001\001"
+ "ality\030\020 \001(\002\022#\n\030last_major_compaction_ts\030" +
+ "\021 \001(\004:\0010\"\212\002\n\nServerLoad\022\032\n\022number_of_req" +
+ "uests\030\001 \001(\r\022 \n\030total_number_of_requests\030" +
+ "\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_heap_" +
+ "MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.RegionL" +
+ "oad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022",
+ "\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_end" +
+ "_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r\"O\n" +
+ "\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Server" +
+ "Name\022 \n\013server_load\030\002 \002(\0132\013.ServerLoad\"\340" +
+ "\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001(\0132" +
+ "\030.HBaseVersionFileContent\022%\n\014live_server" +
+ "s\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_servers" +
+ "\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_transi" +
+ "tion\030\004 \003(\0132\023.RegionInTransition\022\036\n\nclust" +
+ "er_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_coproc",
+ "essors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030\007 \001" +
+ "(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003(\0132" +
+ "\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*org" +
+ ".apache.hadoop.hbase.protobuf.generatedB" +
+ "\023ClusterStatusProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -10527,7 +10618,7 @@ public final class ClusterStatusProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
- new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
+ new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", });
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_ServerLoad_fieldAccessorTable = new