You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by te...@apache.org on 2012/09/06 19:22:02 UTC
svn commit: r1381684 [1/2] - in /hbase/trunk/hbase-server/src:
main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/master/
main/java/org/apache/hadoop/hbase/protobuf/
main/java/org/apache/hadoop/hbase/protobuf/generated/ main/java/org/...
Author: tedyu
Date: Thu Sep 6 17:22:01 2012
New Revision: 1381684
URL: http://svn.apache.org/viewvc?rev=1381684&view=rev
Log:
HBASE-6659 Port HBASE-6508 Filter out edits at log split time
Added:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogFiltering.java
Modified:
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
hbase/trunk/hbase-server/src/main/protobuf/RegionServerStatus.proto
hbase/trunk/hbase-server/src/main/protobuf/hbase.proto
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestHLogSplit.java
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/RegionLoad.java Thu Sep 6 17:22:01 2012
@@ -153,4 +153,12 @@ public class RegionLoad {
public long getCurrentCompactedKVs() {
return regionLoadPB.getCurrentCompactedKVs();
}
+
+ /**
+ * This does not really belong inside RegionLoad but its being done in the name of expediency.
+ * @return the completed sequence Id for the region
+ */
+ public long getCompleteSequenceId() {
+ return regionLoadPB.getCompleteSequenceId();
+ }
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java Thu Sep 6 17:22:01 2012
@@ -30,8 +30,11 @@ import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
+import java.util.SortedMap;
import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@@ -58,6 +61,7 @@ import org.apache.hadoop.hbase.MasterMon
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.NotAllMetaRegionsOnlineException;
import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerLoad;
@@ -150,6 +154,8 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetTableDescriptorsResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
@@ -1150,8 +1156,16 @@ Server {
}
@Override
+ public GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(RpcController controller,
+ GetLastFlushedSequenceIdRequest request) throws ServiceException {
+ byte[] regionName = request.getRegionName().toByteArray();
+ long seqId = serverManager.getLastFlushedSequenceId(regionName);
+ return ResponseConverter.buildGetLastFlushedSequenceIdResponse(seqId);
+ }
+
+ @Override
public RegionServerReportResponse regionServerReport(
- RpcController controller,RegionServerReportRequest request) throws ServiceException {
+ RpcController controller, RegionServerReportRequest request) throws ServiceException {
try {
HBaseProtos.ServerLoad sl = request.getLoad();
this.serverManager.regionServerReport(ProtobufUtil.toServerName(request.getServer()), new ServerLoad(sl));
@@ -1746,12 +1760,14 @@ Server {
}
@Override
- public GetClusterStatusResponse getClusterStatus(RpcController controller, GetClusterStatusRequest req)
+ public GetClusterStatusResponse getClusterStatus(RpcController controller,
+ GetClusterStatusRequest req)
throws ServiceException {
GetClusterStatusResponse.Builder response = GetClusterStatusResponse.newBuilder();
response.setClusterStatus(getClusterStatus().convert());
return response.build();
}
+
/**
* @return cluster status
*/
@@ -1770,7 +1786,8 @@ Server {
for (String s: backupMasterStrings) {
try {
byte [] bytes =
- ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(this.zooKeeper.backupMasterAddressesZNode, s));
+ ZKUtil.getData(this.zooKeeper, ZKUtil.joinZNode(
+ this.zooKeeper.backupMasterAddressesZNode, s));
if (bytes != null) {
ServerName sn;
try {
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java Thu Sep 6 17:22:01 2012
@@ -28,7 +28,10 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.SortedMap;
+import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -37,14 +40,10 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HServerAddress;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.protobuf.RequestConverter;
-import org.apache.hadoop.hbase.protobuf.ResponseConverter;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
-import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
-import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.PleaseHoldException;
+import org.apache.hadoop.hbase.RegionLoad;
import org.apache.hadoop.hbase.Server;
+import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.YouAreDeadException;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
@@ -55,8 +54,16 @@ import org.apache.hadoop.hbase.client.Re
import org.apache.hadoop.hbase.master.handler.MetaServerShutdownHandler;
import org.apache.hadoop.hbase.master.handler.ServerShutdownHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
+import org.apache.hadoop.hbase.protobuf.ResponseConverter;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
+import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
+import org.apache.hadoop.hbase.util.Bytes;
+import com.google.protobuf.RpcController;
import com.google.protobuf.ServiceException;
/**
@@ -79,6 +86,9 @@ public class ServerManager {
// Set if we are to shutdown the cluster.
private volatile boolean clusterShutdown = false;
+ private final SortedMap<byte[], Long> flushedSequenceIdByRegion =
+ new ConcurrentSkipListMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+
/** Map of registered servers to their current load */
private final Map<ServerName, ServerLoad> onlineServers =
new ConcurrentHashMap<ServerName, ServerLoad>();
@@ -163,6 +173,33 @@ public class ServerManager {
return sn;
}
+ /**
+ * Updates last flushed sequence Ids for the regions on server sn
+ * @param sn
+ * @param hsl
+ */
+ private void updateLastFlushedSequenceIds(ServerName sn, ServerLoad hsl) {
+ Map<byte[], RegionLoad> regionsLoad = hsl.getRegionsLoad();
+ for (Entry<byte[], RegionLoad> entry : regionsLoad.entrySet()) {
+ Long existingValue = flushedSequenceIdByRegion.get(entry.getKey());
+ long l = entry.getValue().getCompleteSequenceId();
+ if (existingValue != null) {
+ if (l != -1 && l < existingValue) {
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("RegionServer " + sn +
+ " indicates a last flushed sequence id (" + entry.getValue() +
+ ") that is less than the previous last flushed sequence id (" +
+ existingValue + ") for region " +
+ Bytes.toString(entry.getKey()) + " Ignoring.");
+ }
+ continue; // Don't let smaller sequence ids override greater
+ // sequence ids.
+ }
+ }
+ flushedSequenceIdByRegion.put(entry.getKey(), l);
+ }
+ }
+
void regionServerReport(ServerName sn, ServerLoad sl)
throws YouAreDeadException, PleaseHoldException {
checkIsDead(sn, "REPORT");
@@ -178,6 +215,7 @@ public class ServerManager {
} else {
this.onlineServers.put(sn, sl);
}
+ updateLastFlushedSequenceIds(sn, sl);
}
/**
@@ -271,6 +309,14 @@ public class ServerManager {
this.serverConnections.remove(serverName);
}
+ public long getLastFlushedSequenceId(byte[] regionName) {
+ long seqId = -1;
+ if (flushedSequenceIdByRegion.containsKey(regionName)) {
+ seqId = flushedSequenceIdByRegion.get(regionName);
+ }
+ return seqId;
+ }
+
/**
* @param serverName
* @return ServerLoad if serverName is known else null
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java Thu Sep 6 17:22:01 2012
@@ -101,6 +101,7 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.SetBalancerRunningRequest;
import org.apache.hadoop.hbase.protobuf.generated.MasterMonitorProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
@@ -1138,4 +1139,15 @@ public final class RequestConverter {
public static IsCatalogJanitorEnabledRequest buildIsCatalogJanitorEnabledRequest() {
return IsCatalogJanitorEnabledRequest.newBuilder().build();
}
+
+ /**
+ * Creates a request for querying the master the last flushed sequence Id for a region
+ * @param regionName
+ * @return A {@link GetLastFlushedSequenceIdRequest}
+ */
+ public static GetLastFlushedSequenceIdRequest buildGetLastFlushedSequenceIdRequest(
+ byte[] regionName) {
+ return GetLastFlushedSequenceIdRequest.newBuilder().setRegionName(
+ ByteString.copyFrom(regionName)).build();
+ }
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java Thu Sep 6 17:22:01 2012
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.CatalogScanResponse;
import org.apache.hadoop.hbase.protobuf.generated.MasterAdminProtos.EnableCatalogJanitorResponse;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.util.StringUtils;
@@ -238,4 +239,14 @@ public final class ResponseConverter {
}
// End utilities for Admin
+
+ /**
+ * Creates a response for the last flushed sequence Id request
+ * @return A GetLastFlushedSequenceIdResponse
+ */
+ public static GetLastFlushedSequenceIdResponse buildGetLastFlushedSequenceIdResponse(
+ long seqId) {
+ return GetLastFlushedSequenceIdResponse.newBuilder().setLastFlushedSequenceId(seqId).build();
+ }
+
}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/HBaseProtos.java Thu Sep 6 17:22:01 2012
@@ -4049,6 +4049,10 @@ public final class HBaseProtos {
getCoprocessorsOrBuilderList();
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index);
+
+ // optional uint64 completeSequenceId = 16;
+ boolean hasCompleteSequenceId();
+ long getCompleteSequenceId();
}
public static final class RegionLoad extends
com.google.protobuf.GeneratedMessage
@@ -4243,6 +4247,16 @@ public final class HBaseProtos {
return coprocessors_.get(index);
}
+ // optional uint64 completeSequenceId = 16;
+ public static final int COMPLETESEQUENCEID_FIELD_NUMBER = 16;
+ private long completeSequenceId_;
+ public boolean hasCompleteSequenceId() {
+ return ((bitField0_ & 0x00004000) == 0x00004000);
+ }
+ public long getCompleteSequenceId() {
+ return completeSequenceId_;
+ }
+
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@@ -4259,6 +4273,7 @@ public final class HBaseProtos {
totalStaticIndexSizeKB_ = 0;
totalStaticBloomSizeKB_ = 0;
coprocessors_ = java.util.Collections.emptyList();
+ completeSequenceId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4331,6 +4346,9 @@ public final class HBaseProtos {
for (int i = 0; i < coprocessors_.size(); i++) {
output.writeMessage(15, coprocessors_.get(i));
}
+ if (((bitField0_ & 0x00004000) == 0x00004000)) {
+ output.writeUInt64(16, completeSequenceId_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4400,6 +4418,10 @@ public final class HBaseProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(15, coprocessors_.get(i));
}
+ if (((bitField0_ & 0x00004000) == 0x00004000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(16, completeSequenceId_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4495,6 +4517,11 @@ public final class HBaseProtos {
}
result = result && getCoprocessorsList()
.equals(other.getCoprocessorsList());
+ result = result && (hasCompleteSequenceId() == other.hasCompleteSequenceId());
+ if (hasCompleteSequenceId()) {
+ result = result && (getCompleteSequenceId()
+ == other.getCompleteSequenceId());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -4564,6 +4591,10 @@ public final class HBaseProtos {
hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
hash = (53 * hash) + getCoprocessorsList().hashCode();
}
+ if (hasCompleteSequenceId()) {
+ hash = (37 * hash) + COMPLETESEQUENCEID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getCompleteSequenceId());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
return hash;
}
@@ -4720,6 +4751,8 @@ public final class HBaseProtos {
} else {
coprocessorsBuilder_.clear();
}
+ completeSequenceId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00008000);
return this;
}
@@ -4827,6 +4860,10 @@ public final class HBaseProtos {
} else {
result.coprocessors_ = coprocessorsBuilder_.build();
}
+ if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
+ to_bitField0_ |= 0x00004000;
+ }
+ result.completeSequenceId_ = completeSequenceId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -4911,6 +4948,9 @@ public final class HBaseProtos {
}
}
}
+ if (other.hasCompleteSequenceId()) {
+ setCompleteSequenceId(other.getCompleteSequenceId());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -5036,6 +5076,11 @@ public final class HBaseProtos {
addCoprocessors(subBuilder.buildPartial());
break;
}
+ case 128: {
+ bitField0_ |= 0x00008000;
+ completeSequenceId_ = input.readUInt64();
+ break;
+ }
}
}
}
@@ -5591,6 +5636,27 @@ public final class HBaseProtos {
return coprocessorsBuilder_;
}
+ // optional uint64 completeSequenceId = 16;
+ private long completeSequenceId_ ;
+ public boolean hasCompleteSequenceId() {
+ return ((bitField0_ & 0x00008000) == 0x00008000);
+ }
+ public long getCompleteSequenceId() {
+ return completeSequenceId_;
+ }
+ public Builder setCompleteSequenceId(long value) {
+ bitField0_ |= 0x00008000;
+ completeSequenceId_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearCompleteSequenceId() {
+ bitField0_ = (bitField0_ & ~0x00008000);
+ completeSequenceId_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
@@ -11091,7 +11157,7 @@ public final class HBaseProtos {
"\0222\n\004type\030\001 \002(\0162$.RegionSpecifier.RegionS" +
"pecifierType\022\r\n\005value\030\002 \002(\014\"?\n\023RegionSpe" +
"cifierType\022\017\n\013REGION_NAME\020\001\022\027\n\023ENCODED_R" +
- "EGION_NAME\020\002\"\270\003\n\nRegionLoad\022)\n\017regionSpe" +
+ "EGION_NAME\020\002\"\324\003\n\nRegionLoad\022)\n\017regionSpe" +
"cifier\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores" +
"\030\002 \001(\r\022\022\n\nstorefiles\030\003 \001(\r\022\037\n\027storeUncom" +
"pressedSizeMB\030\004 \001(\r\022\027\n\017storefileSizeMB\030\005" +
@@ -11102,31 +11168,31 @@ public final class HBaseProtos {
"dKVs\030\013 \001(\004\022\027\n\017rootIndexSizeKB\030\014 \001(\r\022\036\n\026t" +
"otalStaticIndexSizeKB\030\r \001(\r\022\036\n\026totalStat" +
"icBloomSizeKB\030\016 \001(\r\022\"\n\014coprocessors\030\017 \003(" +
- "\0132\014.Coprocessor\"\342\001\n\nServerLoad\022\030\n\020number" +
- "OfRequests\030\001 \001(\r\022\035\n\025totalNumberOfRequest" +
- "s\030\002 \001(\r\022\022\n\nusedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB" +
- "\030\004 \001(\r\022 \n\013regionLoads\030\005 \003(\0132\013.RegionLoad" +
- "\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocessor\022\027\n\017",
- "reportStartTime\030\007 \001(\004\022\025\n\rreportEndTime\030\010" +
- " \001(\004\"%\n\tTimeRange\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 " +
- "\001(\004\"0\n\006Filter\022\014\n\004name\030\001 \002(\t\022\030\n\020serialize" +
- "dFilter\030\002 \001(\014\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016" +
- "\n\006family\030\002 \002(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttim" +
- "estamp\030\004 \001(\004\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022" +
- "\r\n\005value\030\006 \001(\014\"?\n\nServerName\022\020\n\010hostName" +
- "\030\001 \002(\t\022\014\n\004port\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"" +
- "\033\n\013Coprocessor\022\014\n\004name\030\001 \002(\t\"-\n\016NameStri" +
- "ngPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rN",
- "ameBytesPair\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(" +
- "\014\"/\n\016BytesBytesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006se" +
- "cond\030\002 \002(\014*r\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rL" +
- "ESS_OR_EQUAL\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003" +
- "\022\024\n\020GREATER_OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005N" +
- "O_OP\020\006*_\n\007KeyType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022" +
- "\n\n\006DELETE\020\010\022\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE" +
- "_FAMILY\020\016\022\014\n\007MAXIMUM\020\377\001B>\n*org.apache.ha" +
- "doop.hbase.protobuf.generatedB\013HBaseProt" +
- "osH\001\240\001\001"
+ "\0132\014.Coprocessor\022\032\n\022completeSequenceId\030\020 " +
+ "\001(\004\"\342\001\n\nServerLoad\022\030\n\020numberOfRequests\030\001" +
+ " \001(\r\022\035\n\025totalNumberOfRequests\030\002 \001(\r\022\022\n\nu" +
+ "sedHeapMB\030\003 \001(\r\022\021\n\tmaxHeapMB\030\004 \001(\r\022 \n\013re" +
+ "gionLoads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coproces",
+ "sors\030\006 \003(\0132\014.Coprocessor\022\027\n\017reportStartT" +
+ "ime\030\007 \001(\004\022\025\n\rreportEndTime\030\010 \001(\004\"%\n\tTime" +
+ "Range\022\014\n\004from\030\001 \001(\004\022\n\n\002to\030\002 \001(\004\"0\n\006Filte" +
+ "r\022\014\n\004name\030\001 \002(\t\022\030\n\020serializedFilter\030\002 \001(" +
+ "\014\"w\n\010KeyValue\022\013\n\003row\030\001 \002(\014\022\016\n\006family\030\002 \002" +
+ "(\014\022\021\n\tqualifier\030\003 \002(\014\022\021\n\ttimestamp\030\004 \001(\004" +
+ "\022\031\n\007keyType\030\005 \001(\0162\010.KeyType\022\r\n\005value\030\006 \001" +
+ "(\014\"?\n\nServerName\022\020\n\010hostName\030\001 \002(\t\022\014\n\004po" +
+ "rt\030\002 \001(\r\022\021\n\tstartCode\030\003 \001(\004\"\033\n\013Coprocess" +
+ "or\022\014\n\004name\030\001 \002(\t\"-\n\016NameStringPair\022\014\n\004na",
+ "me\030\001 \002(\t\022\r\n\005value\030\002 \002(\t\",\n\rNameBytesPair" +
+ "\022\014\n\004name\030\001 \002(\t\022\r\n\005value\030\002 \001(\014\"/\n\016BytesBy" +
+ "tesPair\022\r\n\005first\030\001 \002(\014\022\016\n\006second\030\002 \002(\014*r" +
+ "\n\013CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL" +
+ "\020\001\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_" +
+ "OR_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*_\n\007Ke" +
+ "yType\022\013\n\007MINIMUM\020\000\022\007\n\003PUT\020\004\022\n\n\006DELETE\020\010\022" +
+ "\021\n\rDELETE_COLUMN\020\014\022\021\n\rDELETE_FAMILY\020\016\022\014\n" +
+ "\007MAXIMUM\020\377\001B>\n*org.apache.hadoop.hbase.p" +
+ "rotobuf.generatedB\013HBaseProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -11186,7 +11252,7 @@ public final class HBaseProtos {
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
- new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "Coprocessors", },
+ new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "Coprocessors", "CompleteSequenceId", },
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.class,
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad.Builder.class);
internal_static_ServerLoad_descriptor =
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/protobuf/generated/RegionServerStatusProtos.java Thu Sep 6 17:22:01 2012
@@ -2949,6 +2949,767 @@ public final class RegionServerStatusPro
// @@protoc_insertion_point(class_scope:ReportRSFatalErrorResponse)
}
+ public interface GetLastFlushedSequenceIdRequestOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes regionName = 1;
+ boolean hasRegionName();
+ com.google.protobuf.ByteString getRegionName();
+ }
+ public static final class GetLastFlushedSequenceIdRequest extends
+ com.google.protobuf.GeneratedMessage
+ implements GetLastFlushedSequenceIdRequestOrBuilder {
+ // Use GetLastFlushedSequenceIdRequest.newBuilder() to construct.
+ private GetLastFlushedSequenceIdRequest(Builder builder) {
+ super(builder);
+ }
+ private GetLastFlushedSequenceIdRequest(boolean noInit) {}
+
+ private static final GetLastFlushedSequenceIdRequest defaultInstance;
+ public static GetLastFlushedSequenceIdRequest getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetLastFlushedSequenceIdRequest getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required bytes regionName = 1;
+ public static final int REGIONNAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString regionName_;
+ public boolean hasRegionName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getRegionName() {
+ return regionName_;
+ }
+
+ private void initFields() {
+ regionName_ = com.google.protobuf.ByteString.EMPTY;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasRegionName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, regionName_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, regionName_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) obj;
+
+ boolean result = true;
+ result = result && (hasRegionName() == other.hasRegionName());
+ if (hasRegionName()) {
+ result = result && getRegionName()
+ .equals(other.getRegionName());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasRegionName()) {
+ hash = (37 * hash) + REGIONNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getRegionName().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequestOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ regionName_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest build() {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.regionName_ = regionName_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance()) return this;
+ if (other.hasRegionName()) {
+ setRegionName(other.getRegionName());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasRegionName()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ regionName_ = input.readBytes();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes regionName = 1;
+ private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasRegionName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getRegionName() {
+ return regionName_;
+ }
+ public Builder setRegionName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ regionName_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearRegionName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ regionName_ = getDefaultInstance().getRegionName();
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdRequest)
+ }
+
+ static {
+ defaultInstance = new GetLastFlushedSequenceIdRequest(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdRequest)
+ }
+
+ public interface GetLastFlushedSequenceIdResponseOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 lastFlushedSequenceId = 1;
+ boolean hasLastFlushedSequenceId();
+ long getLastFlushedSequenceId();
+ }
+ public static final class GetLastFlushedSequenceIdResponse extends
+ com.google.protobuf.GeneratedMessage
+ implements GetLastFlushedSequenceIdResponseOrBuilder {
+ // Use GetLastFlushedSequenceIdResponse.newBuilder() to construct.
+ private GetLastFlushedSequenceIdResponse(Builder builder) {
+ super(builder);
+ }
+ private GetLastFlushedSequenceIdResponse(boolean noInit) {}
+
+ private static final GetLastFlushedSequenceIdResponse defaultInstance;
+ public static GetLastFlushedSequenceIdResponse getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public GetLastFlushedSequenceIdResponse getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required uint64 lastFlushedSequenceId = 1;
+ public static final int LASTFLUSHEDSEQUENCEID_FIELD_NUMBER = 1;
+ private long lastFlushedSequenceId_;
+ public boolean hasLastFlushedSequenceId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLastFlushedSequenceId() {
+ return lastFlushedSequenceId_;
+ }
+
+ private void initFields() {
+ lastFlushedSequenceId_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasLastFlushedSequenceId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, lastFlushedSequenceId_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, lastFlushedSequenceId_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other = (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) obj;
+
+ boolean result = true;
+ result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId());
+ if (hasLastFlushedSequenceId()) {
+ result = result && (getLastFlushedSequenceId()
+ == other.getLastFlushedSequenceId());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasLastFlushedSequenceId()) {
+ hash = (37 * hash) + LASTFLUSHEDSEQUENCEID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponseOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ lastFlushedSequenceId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse build() {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse result = new org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.lastFlushedSequenceId_ = lastFlushedSequenceId_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()) return this;
+ if (other.hasLastFlushedSequenceId()) {
+ setLastFlushedSequenceId(other.getLastFlushedSequenceId());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasLastFlushedSequenceId()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ lastFlushedSequenceId_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required uint64 lastFlushedSequenceId = 1;
+ private long lastFlushedSequenceId_ ;
+ public boolean hasLastFlushedSequenceId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLastFlushedSequenceId() {
+ return lastFlushedSequenceId_;
+ }
+ public Builder setLastFlushedSequenceId(long value) {
+ bitField0_ |= 0x00000001;
+ lastFlushedSequenceId_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearLastFlushedSequenceId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ lastFlushedSequenceId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:GetLastFlushedSequenceIdResponse)
+ }
+
+ static {
+ defaultInstance = new GetLastFlushedSequenceIdResponse(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:GetLastFlushedSequenceIdResponse)
+ }
+
public static abstract class RegionServerStatusService
implements com.google.protobuf.Service {
protected RegionServerStatusService() {}
@@ -2969,6 +3730,11 @@ public final class RegionServerStatusPro
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done);
+ public abstract void getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done);
+
}
public static com.google.protobuf.Service newReflectiveService(
@@ -2998,6 +3764,14 @@ public final class RegionServerStatusPro
impl.reportRSFatalError(controller, request, done);
}
+ @java.lang.Override
+ public void getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) {
+ impl.getLastFlushedSequenceId(controller, request, done);
+ }
+
};
}
@@ -3026,6 +3800,8 @@ public final class RegionServerStatusPro
return impl.regionServerReport(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest)request);
case 2:
return impl.reportRSFatalError(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest)request);
+ case 3:
+ return impl.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request);
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3046,6 +3822,8 @@ public final class RegionServerStatusPro
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3066,6 +3844,8 @@ public final class RegionServerStatusPro
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3089,6 +3869,11 @@ public final class RegionServerStatusPro
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request,
com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse> done);
+ public abstract void getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done);
+
public static final
com.google.protobuf.Descriptors.ServiceDescriptor
getDescriptor() {
@@ -3126,6 +3911,11 @@ public final class RegionServerStatusPro
com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse>specializeCallback(
done));
return;
+ case 3:
+ this.getLastFlushedSequenceId(controller, (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest)request,
+ com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse>specializeCallback(
+ done));
+ return;
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3146,6 +3936,8 @@ public final class RegionServerStatusPro
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3166,6 +3958,8 @@ public final class RegionServerStatusPro
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportResponse.getDefaultInstance();
case 2:
return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance();
+ case 3:
+ return org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance();
default:
throw new java.lang.AssertionError("Can't get here.");
}
@@ -3231,6 +4025,21 @@ public final class RegionServerStatusPro
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance()));
}
+
+ public void getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request,
+ com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse> done) {
+ channel.callMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance(),
+ com.google.protobuf.RpcUtil.generalizeCallback(
+ done,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance()));
+ }
}
public static BlockingInterface newBlockingStub(
@@ -3253,6 +4062,11 @@ public final class RegionServerStatusPro
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest request)
throws com.google.protobuf.ServiceException;
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request)
+ throws com.google.protobuf.ServiceException;
}
private static final class BlockingStub implements BlockingInterface {
@@ -3297,6 +4111,18 @@ public final class RegionServerStatusPro
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.getDefaultInstance());
}
+
+ public org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse getLastFlushedSequenceId(
+ com.google.protobuf.RpcController controller,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest request)
+ throws com.google.protobuf.ServiceException {
+ return (org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse) channel.callBlockingMethod(
+ getDescriptor().getMethods().get(3),
+ controller,
+ request,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.getDefaultInstance());
+ }
+
}
}
@@ -3330,6 +4156,16 @@ public final class RegionServerStatusPro
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReportRSFatalErrorResponse_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetLastFlushedSequenceIdRequest_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_GetLastFlushedSequenceIdResponse_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -3349,16 +4185,21 @@ public final class RegionServerStatusPro
"rverLoad\"\034\n\032RegionServerReportResponse\"N" +
"\n\031ReportRSFatalErrorRequest\022\033\n\006server\030\001 " +
"\002(\0132\013.ServerName\022\024\n\014errorMessage\030\002 \002(\t\"\034",
- "\n\032ReportRSFatalErrorResponse2\213\002\n\031RegionS" +
- "erverStatusService\022P\n\023regionServerStartu" +
- "p\022\033.RegionServerStartupRequest\032\034.RegionS" +
- "erverStartupResponse\022M\n\022regionServerRepo" +
- "rt\022\032.RegionServerReportRequest\032\033.RegionS" +
- "erverReportResponse\022M\n\022reportRSFatalErro" +
- "r\022\032.ReportRSFatalErrorRequest\032\033.ReportRS" +
- "FatalErrorResponseBN\n*org.apache.hadoop." +
- "hbase.protobuf.generatedB\030RegionServerSt" +
- "atusProtosH\001\210\001\001\240\001\001"
+ "\n\032ReportRSFatalErrorResponse\"5\n\037GetLastF" +
+ "lushedSequenceIdRequest\022\022\n\nregionName\030\001 " +
+ "\002(\014\"A\n GetLastFlushedSequenceIdResponse\022" +
+ "\035\n\025lastFlushedSequenceId\030\001 \002(\0042\354\002\n\031Regio" +
+ "nServerStatusService\022P\n\023regionServerStar" +
+ "tup\022\033.RegionServerStartupRequest\032\034.Regio" +
+ "nServerStartupResponse\022M\n\022regionServerRe" +
+ "port\022\032.RegionServerReportRequest\032\033.Regio" +
+ "nServerReportResponse\022M\n\022reportRSFatalEr" +
+ "ror\022\032.ReportRSFatalErrorRequest\032\033.Report",
+ "RSFatalErrorResponse\022_\n\030getLastFlushedSe" +
+ "quenceId\022 .GetLastFlushedSequenceIdReque" +
+ "st\032!.GetLastFlushedSequenceIdResponseBN\n" +
+ "*org.apache.hadoop.hbase.protobuf.genera" +
+ "tedB\030RegionServerStatusProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -3413,6 +4254,22 @@ public final class RegionServerStatusPro
new java.lang.String[] { },
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.class,
org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse.Builder.class);
+ internal_static_GetLastFlushedSequenceIdRequest_descriptor =
+ getDescriptor().getMessageTypes().get(6);
+ internal_static_GetLastFlushedSequenceIdRequest_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetLastFlushedSequenceIdRequest_descriptor,
+ new java.lang.String[] { "RegionName", },
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.class,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest.Builder.class);
+ internal_static_GetLastFlushedSequenceIdResponse_descriptor =
+ getDescriptor().getMessageTypes().get(7);
+ internal_static_GetLastFlushedSequenceIdResponse_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_GetLastFlushedSequenceIdResponse_descriptor,
+ new java.lang.String[] { "LastFlushedSequenceId", },
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.class,
+ org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse.Builder.class);
return null;
}
};
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Thu Sep 6 17:22:01 2012
@@ -79,6 +79,7 @@ import org.apache.hadoop.hbase.HConstant
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HServerInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.NotServingRegionException;
@@ -190,6 +191,8 @@ public class HRegion implements HeapSize
*/
final AtomicBoolean closing = new AtomicBoolean(false);
+ protected long completeSequenceId = -1L;
+
//////////////////////////////////////////////////////////////////////////////
// Members
//////////////////////////////////////////////////////////////////////////////
@@ -1478,7 +1481,6 @@ public class HRegion implements HeapSize
// again so its value will represent the size of the updates received
// during the flush
long sequenceId = -1L;
- long completeSequenceId = -1L;
MultiVersionConsistencyControl.WriteEntry w = null;
// We have to take a write lock during snapshot, or else a write could
@@ -1489,6 +1491,7 @@ public class HRegion implements HeapSize
long flushsize = this.memstoreSize.get();
status.setStatus("Preparing to flush by snapshotting stores");
List<StoreFlusher> storeFlushers = new ArrayList<StoreFlusher>(stores.size());
+ long completeSeqId = -1L;
try {
// Record the mvcc for all transactions in progress.
w = mvcc.beginMemstoreInsert();
@@ -1496,10 +1499,9 @@ public class HRegion implements HeapSize
sequenceId = (wal == null)? myseqid:
wal.startCacheFlush(this.regionInfo.getEncodedNameAsBytes());
- completeSequenceId = this.getCompleteCacheFlushSequenceId(sequenceId);
-
+ completeSeqId = this.getCompleteCacheFlushSequenceId(sequenceId);
for (Store s : stores.values()) {
- storeFlushers.add(s.getStoreFlusher(completeSequenceId));
+ storeFlushers.add(s.getStoreFlusher(completeSeqId));
}
// prepare flush (take a snapshot)
@@ -1577,10 +1579,15 @@ public class HRegion implements HeapSize
// log-sequence-ids can be safely ignored.
if (wal != null) {
wal.completeCacheFlush(this.regionInfo.getEncodedNameAsBytes(),
- regionInfo.getTableName(), completeSequenceId,
+ regionInfo.getTableName(), completeSeqId,
this.getRegionInfo().isMetaRegion());
}
+ // Update the last flushed sequence id for region
+ if (this.rsServices != null) {
+ completeSequenceId = completeSeqId;
+ }
+
// C. Finally notify anyone waiting on memstore to clear:
// e.g. checkResources().
synchronized (this) {
@@ -5010,7 +5017,7 @@ public class HRegion implements HeapSize
ClassSize.OBJECT +
ClassSize.ARRAY +
36 * ClassSize.REFERENCE + Bytes.SIZEOF_INT +
- (6 * Bytes.SIZEOF_LONG) +
+ (7 * Bytes.SIZEOF_LONG) +
Bytes.SIZEOF_BOOLEAN);
public static final long DEEP_OVERHEAD = FIXED_OVERHEAD +
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Thu Sep 6 17:22:01 2012
@@ -49,6 +49,7 @@ import java.util.concurrent.ConcurrentMa
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.ObjectName;
@@ -114,6 +115,7 @@ import org.apache.hadoop.hbase.ipc.Proto
import org.apache.hadoop.hbase.ipc.RpcServer;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
@@ -214,6 +216,7 @@ import org.apache.hadoop.hbase.protobuf.
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionLoad;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameStringPair;
+import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
@@ -232,7 +235,7 @@ import com.google.protobuf.RpcController
@InterfaceAudience.Private
@SuppressWarnings("deprecation")
public class HRegionServer implements ClientProtocol,
- AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler {
+ AdminProtocol, Runnable, RegionServerServices, HBaseRPCErrorHandler, LastSequenceId {
public static final Log LOG = LogFactory.getLog(HRegionServer.class);
@@ -1248,7 +1251,8 @@ public class HRegionServer implements C
.setReadRequestsCount((int) r.readRequestsCount.get())
.setWriteRequestsCount((int) r.writeRequestsCount.get())
.setTotalCompactingKVs(totalCompactingKVs)
- .setCurrentCompactedKVs(currentCompactedKVs);
+ .setCurrentCompactedKVs(currentCompactedKVs)
+ .setCompleteSequenceId(r.completeSequenceId);
Set<String> coprocessors = r.getCoprocessorHost().getCoprocessors();
for (String coprocessor : coprocessors) {
regionLoad.addCoprocessors(
@@ -1622,7 +1626,7 @@ public class HRegionServer implements C
// Create the log splitting worker and start it
this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
- this.getConfiguration(), this.getServerName());
+ this.getConfiguration(), this.getServerName(), this);
splitLogWorker.start();
}
@@ -1969,6 +1973,22 @@ public class HRegionServer implements C
return result;
}
+ @Override
+ public long getLastSequenceId(byte[] region) {
+ Long lastFlushedSequenceId = -1l;
+ try {
+ GetLastFlushedSequenceIdRequest req =
+ RequestConverter.buildGetLastFlushedSequenceIdRequest(region);
+ lastFlushedSequenceId = hbaseMaster.getLastFlushedSequenceId(null, req)
+ .getLastFlushedSequenceId();
+ } catch (ServiceException e) {
+ lastFlushedSequenceId = -1l;
+ LOG.warn("Unable to connect to the master to check " +
+ "the last flushed sequence id", e);
+ }
+ return lastFlushedSequenceId;
+ }
+
/**
* Closes all regions. Called on our way out.
* Assumes that its not possible for new regions to be added to onlineRegions
Added: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java?rev=1381684&view=auto
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java (added)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/LastSequenceId.java Thu Sep 6 17:22:01 2012
@@ -0,0 +1,33 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+/**
+ * Last flushed sequence Ids for the regions on region server
+ */
+@InterfaceAudience.Private
+public interface LastSequenceId {
+ /**
+ * @param regionname
+ * @return Last flushed sequence Id for regionname
+ */
+ public long getLastSequenceId(byte[] regionname);
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java?rev=1381684&r1=1381683&r2=1381684&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java Thu Sep 6 17:22:01 2012
@@ -30,9 +30,11 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.RegionServerStatusProtocol;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.SplitLogCounters;
import org.apache.hadoop.hbase.SplitLogTask;
+import org.apache.hadoop.hbase.master.SplitLogManager;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.FSUtils;
@@ -90,7 +92,7 @@ public class SplitLogWorker extends ZooK
}
public SplitLogWorker(ZooKeeperWatcher watcher, final Configuration conf,
- final ServerName serverName) {
+ final ServerName serverName, final LastSequenceId sequenceIdChecker) {
this(watcher, conf, serverName, new TaskExecutor () {
@Override
public Status exec(String filename, CancelableProgressable p) {
@@ -108,7 +110,7 @@ public class SplitLogWorker extends ZooK
// encountered a bad non-retry-able persistent error.
try {
if (HLogSplitter.splitLogFile(rootdir,
- fs.getFileStatus(new Path(filename)), fs, conf, p) == false) {
+ fs.getFileStatus(new Path(filename)), fs, conf, p, sequenceIdChecker) == false) {
return Status.PREEMPTED;
}
} catch (InterruptedIOException iioe) {