You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2013/05/24 23:18:33 UTC
svn commit: r1486221 - in /hbase/trunk:
hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/
hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/
hbase-protocol/src/main/protobuf/
hbase-server/src/main/java/org/apache/hadoo...
Author: stack
Date: Fri May 24 21:18:32 2013
New Revision: 1486221
URL: http://svn.apache.org/r1486221
Log:
HBASE-8573 Store last flushed sequence id for each store of region for Distributed Log Replay
Modified:
hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
Modified: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java (original)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java Fri May 24 21:18:32 2013
@@ -29,6 +29,8 @@ import org.apache.hadoop.hbase.ServerNam
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp.CreateAndFailSilent;
@@ -52,6 +54,7 @@ import org.apache.zookeeper.proto.Delete
import org.apache.zookeeper.proto.SetDataRequest;
import org.apache.zookeeper.server.ZooKeeperSaslServer;
+import com.google.protobuf.ByteString;
import com.google.protobuf.InvalidProtocolBufferException;
import javax.security.auth.login.AppConfigurationEntry;
@@ -69,6 +72,7 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
+import java.util.TreeMap;
/**
* Internal HBase utility class for ZooKeeper.
@@ -1857,4 +1861,53 @@ public class ZKUtil {
return 0;
}
}
+
+ /**
+ * @param regionLastFlushedSequenceId the flushed sequence id of a region which is the min of its
+ * store max seq ids
+ * @param storeSequenceIds column family to sequence Id map
+ * @return Serialized protobuf of <code>RegionSequenceIds</code> with pb magic prefix prepended
+ * suitable for use to filter wal edits in distributedLogReplay mode
+ */
+ public static byte[] regionSequenceIdsToByteArray(final Long regionLastFlushedSequenceId,
+ final Map<byte[], Long> storeSequenceIds) {
+ ZooKeeperProtos.RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
+ ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
+ ZooKeeperProtos.StoreSequenceId.Builder storeSequenceIdBuilder =
+ ZooKeeperProtos.StoreSequenceId.newBuilder();
+ if (storeSequenceIds != null) {
+ for (byte[] columnFamilyName : storeSequenceIds.keySet()) {
+ Long curSeqId = storeSequenceIds.get(columnFamilyName);
+ storeSequenceIdBuilder.setFamilyName(ByteString.copyFrom(columnFamilyName));
+ storeSequenceIdBuilder.setSequenceId(curSeqId);
+ regionSequenceIdsBuilder.addStoreSequenceId(storeSequenceIdBuilder.build());
+ storeSequenceIdBuilder.clear();
+ }
+ }
+ regionSequenceIdsBuilder.setLastFlushedSequenceId(regionLastFlushedSequenceId);
+ byte[] result = regionSequenceIdsBuilder.build().toByteArray();
+ return ProtobufUtil.prependPBMagic(result);
+ }
+
+ /**
+ * @param bytes Content of serialized data of RegionStoreSequenceIds
+ * @return a RegionStoreSequenceIds object
+ * @throws DeserializationException
+ */
+ public static RegionStoreSequenceIds parseRegionStoreSequenceIds(final byte[] bytes)
+ throws DeserializationException {
+ if (bytes == null || !ProtobufUtil.isPBMagicPrefix(bytes)) {
+ throw new DeserializationException("Unable to parse RegionStoreSequenceIds.");
+ }
+ RegionStoreSequenceIds.Builder regionSequenceIdsBuilder =
+ ZooKeeperProtos.RegionStoreSequenceIds.newBuilder();
+ int pblen = ProtobufUtil.lengthOfPBMagic();
+ RegionStoreSequenceIds storeIds = null;
+ try {
+ storeIds = regionSequenceIdsBuilder.mergeFrom(bytes, pblen, bytes.length - pblen).build();
+ } catch (InvalidProtocolBufferException e) {
+ throw new DeserializationException(e);
+ }
+ return storeIds;
+ }
}
Modified: hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/trunk/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Fri May 24 21:18:32 2013
@@ -5741,6 +5741,1132 @@ public final class ZooKeeperProtos {
// @@protoc_insertion_point(class_scope:TableLock)
}
+ public interface StoreSequenceIdOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required bytes familyName = 1;
+ boolean hasFamilyName();
+ com.google.protobuf.ByteString getFamilyName();
+
+ // required uint64 sequenceId = 2;
+ boolean hasSequenceId();
+ long getSequenceId();
+ }
+ public static final class StoreSequenceId extends
+ com.google.protobuf.GeneratedMessage
+ implements StoreSequenceIdOrBuilder {
+ // Use StoreSequenceId.newBuilder() to construct.
+ private StoreSequenceId(Builder builder) {
+ super(builder);
+ }
+ private StoreSequenceId(boolean noInit) {}
+
+ private static final StoreSequenceId defaultInstance;
+ public static StoreSequenceId getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public StoreSequenceId getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required bytes familyName = 1;
+ public static final int FAMILYNAME_FIELD_NUMBER = 1;
+ private com.google.protobuf.ByteString familyName_;
+ public boolean hasFamilyName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getFamilyName() {
+ return familyName_;
+ }
+
+ // required uint64 sequenceId = 2;
+ public static final int SEQUENCEID_FIELD_NUMBER = 2;
+ private long sequenceId_;
+ public boolean hasSequenceId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getSequenceId() {
+ return sequenceId_;
+ }
+
+ private void initFields() {
+ familyName_ = com.google.protobuf.ByteString.EMPTY;
+ sequenceId_ = 0L;
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasFamilyName()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ if (!hasSequenceId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeBytes(1, familyName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ output.writeUInt64(2, sequenceId_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeBytesSize(1, familyName_);
+ }
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(2, sequenceId_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) obj;
+
+ boolean result = true;
+ result = result && (hasFamilyName() == other.hasFamilyName());
+ if (hasFamilyName()) {
+ result = result && getFamilyName()
+ .equals(other.getFamilyName());
+ }
+ result = result && (hasSequenceId() == other.hasSequenceId());
+ if (hasSequenceId()) {
+ result = result && (getSequenceId()
+ == other.getSequenceId());
+ }
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasFamilyName()) {
+ hash = (37 * hash) + FAMILYNAME_FIELD_NUMBER;
+ hash = (53 * hash) + getFamilyName().hashCode();
+ }
+ if (hasSequenceId()) {
+ hash = (37 * hash) + SEQUENCEID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getSequenceId());
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_StoreSequenceId_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ familyName_ = com.google.protobuf.ByteString.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ sequenceId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId build() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.familyName_ = familyName_;
+ if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+ to_bitField0_ |= 0x00000002;
+ }
+ result.sequenceId_ = sequenceId_;
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance()) return this;
+ if (other.hasFamilyName()) {
+ setFamilyName(other.getFamilyName());
+ }
+ if (other.hasSequenceId()) {
+ setSequenceId(other.getSequenceId());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasFamilyName()) {
+
+ return false;
+ }
+ if (!hasSequenceId()) {
+
+ return false;
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 10: {
+ bitField0_ |= 0x00000001;
+ familyName_ = input.readBytes();
+ break;
+ }
+ case 16: {
+ bitField0_ |= 0x00000002;
+ sequenceId_ = input.readUInt64();
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required bytes familyName = 1;
+ private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY;
+ public boolean hasFamilyName() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public com.google.protobuf.ByteString getFamilyName() {
+ return familyName_;
+ }
+ public Builder setFamilyName(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ familyName_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearFamilyName() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ familyName_ = getDefaultInstance().getFamilyName();
+ onChanged();
+ return this;
+ }
+
+ // required uint64 sequenceId = 2;
+ private long sequenceId_ ;
+ public boolean hasSequenceId() {
+ return ((bitField0_ & 0x00000002) == 0x00000002);
+ }
+ public long getSequenceId() {
+ return sequenceId_;
+ }
+ public Builder setSequenceId(long value) {
+ bitField0_ |= 0x00000002;
+ sequenceId_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearSequenceId() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ sequenceId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // @@protoc_insertion_point(builder_scope:StoreSequenceId)
+ }
+
+ static {
+ defaultInstance = new StoreSequenceId(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:StoreSequenceId)
+ }
+
+ public interface RegionStoreSequenceIdsOrBuilder
+ extends com.google.protobuf.MessageOrBuilder {
+
+ // required uint64 lastFlushedSequenceId = 1;
+ boolean hasLastFlushedSequenceId();
+ long getLastFlushedSequenceId();
+
+ // repeated .StoreSequenceId storeSequenceId = 2;
+ java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId>
+ getStoreSequenceIdList();
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index);
+ int getStoreSequenceIdCount();
+ java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>
+ getStoreSequenceIdOrBuilderList();
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
+ int index);
+ }
+ public static final class RegionStoreSequenceIds extends
+ com.google.protobuf.GeneratedMessage
+ implements RegionStoreSequenceIdsOrBuilder {
+ // Use RegionStoreSequenceIds.newBuilder() to construct.
+ private RegionStoreSequenceIds(Builder builder) {
+ super(builder);
+ }
+ private RegionStoreSequenceIds(boolean noInit) {}
+
+ private static final RegionStoreSequenceIds defaultInstance;
+ public static RegionStoreSequenceIds getDefaultInstance() {
+ return defaultInstance;
+ }
+
+ public RegionStoreSequenceIds getDefaultInstanceForType() {
+ return defaultInstance;
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable;
+ }
+
+ private int bitField0_;
+ // required uint64 lastFlushedSequenceId = 1;
+ public static final int LASTFLUSHEDSEQUENCEID_FIELD_NUMBER = 1;
+ private long lastFlushedSequenceId_;
+ public boolean hasLastFlushedSequenceId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLastFlushedSequenceId() {
+ return lastFlushedSequenceId_;
+ }
+
+ // repeated .StoreSequenceId storeSequenceId = 2;
+ public static final int STORESEQUENCEID_FIELD_NUMBER = 2;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> storeSequenceId_;
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> getStoreSequenceIdList() {
+ return storeSequenceId_;
+ }
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>
+ getStoreSequenceIdOrBuilderList() {
+ return storeSequenceId_;
+ }
+ public int getStoreSequenceIdCount() {
+ return storeSequenceId_.size();
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) {
+ return storeSequenceId_.get(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
+ int index) {
+ return storeSequenceId_.get(index);
+ }
+
+ private void initFields() {
+ lastFlushedSequenceId_ = 0L;
+ storeSequenceId_ = java.util.Collections.emptyList();
+ }
+ private byte memoizedIsInitialized = -1;
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized != -1) return isInitialized == 1;
+
+ if (!hasLastFlushedSequenceId()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ for (int i = 0; i < getStoreSequenceIdCount(); i++) {
+ if (!getStoreSequenceId(i).isInitialized()) {
+ memoizedIsInitialized = 0;
+ return false;
+ }
+ }
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ getSerializedSize();
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ output.writeUInt64(1, lastFlushedSequenceId_);
+ }
+ for (int i = 0; i < storeSequenceId_.size(); i++) {
+ output.writeMessage(2, storeSequenceId_.get(i));
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ private int memoizedSerializedSize = -1;
+ public int getSerializedSize() {
+ int size = memoizedSerializedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) == 0x00000001)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(1, lastFlushedSequenceId_);
+ }
+ for (int i = 0; i < storeSequenceId_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(2, storeSequenceId_.get(i));
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSerializedSize = size;
+ return size;
+ }
+
+ private static final long serialVersionUID = 0L;
+ @java.lang.Override
+ protected java.lang.Object writeReplace()
+ throws java.io.ObjectStreamException {
+ return super.writeReplace();
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)) {
+ return super.equals(obj);
+ }
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) obj;
+
+ boolean result = true;
+ result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId());
+ if (hasLastFlushedSequenceId()) {
+ result = result && (getLastFlushedSequenceId()
+ == other.getLastFlushedSequenceId());
+ }
+ result = result && getStoreSequenceIdList()
+ .equals(other.getStoreSequenceIdList());
+ result = result &&
+ getUnknownFields().equals(other.getUnknownFields());
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ int hash = 41;
+ hash = (19 * hash) + getDescriptorForType().hashCode();
+ if (hasLastFlushedSequenceId()) {
+ hash = (37 * hash) + LASTFLUSHEDSEQUENCEID_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
+ }
+ if (getStoreSequenceIdCount() > 0) {
+ hash = (37 * hash) + STORESEQUENCEID_FIELD_NUMBER;
+ hash = (53 * hash) + getStoreSequenceIdList().hashCode();
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ return hash;
+ }
+
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return newBuilder().mergeFrom(data, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ Builder builder = newBuilder();
+ if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+ return builder.buildParsed();
+ } else {
+ return null;
+ }
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input).buildParsed();
+ }
+ public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return newBuilder().mergeFrom(input, extensionRegistry)
+ .buildParsed();
+ }
+
+ public static Builder newBuilder() { return Builder.create(); }
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds prototype) {
+ return newBuilder().mergeFrom(prototype);
+ }
+ public Builder toBuilder() { return newBuilder(this); }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ public static final class Builder extends
+ com.google.protobuf.GeneratedMessage.Builder<Builder>
+ implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIdsOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor
+ getDescriptor() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_descriptor;
+ }
+
+ protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable;
+ }
+
+ // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.newBuilder()
+ private Builder() {
+ maybeForceBuilderInitialization();
+ }
+
+ private Builder(BuilderParent parent) {
+ super(parent);
+ maybeForceBuilderInitialization();
+ }
+ private void maybeForceBuilderInitialization() {
+ if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+ getStoreSequenceIdFieldBuilder();
+ }
+ }
+ private static Builder create() {
+ return new Builder();
+ }
+
+ public Builder clear() {
+ super.clear();
+ lastFlushedSequenceId_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ if (storeSequenceIdBuilder_ == null) {
+ storeSequenceId_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ storeSequenceIdBuilder_.clear();
+ }
+ return this;
+ }
+
+ public Builder clone() {
+ return create().mergeFrom(buildPartial());
+ }
+
+ public com.google.protobuf.Descriptors.Descriptor
+ getDescriptorForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDescriptor();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds getDefaultInstanceForType() {
+ return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance();
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds build() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds buildParsed()
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(
+ result).asInvalidProtocolBufferException();
+ }
+ return result;
+ }
+
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds buildPartial() {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds(this);
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+ to_bitField0_ |= 0x00000001;
+ }
+ result.lastFlushedSequenceId_ = lastFlushedSequenceId_;
+ if (storeSequenceIdBuilder_ == null) {
+ if (((bitField0_ & 0x00000002) == 0x00000002)) {
+ storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_);
+ bitField0_ = (bitField0_ & ~0x00000002);
+ }
+ result.storeSequenceId_ = storeSequenceId_;
+ } else {
+ result.storeSequenceId_ = storeSequenceIdBuilder_.build();
+ }
+ result.bitField0_ = to_bitField0_;
+ onBuilt();
+ return result;
+ }
+
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds) {
+ return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds)other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds other) {
+ if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.getDefaultInstance()) return this;
+ if (other.hasLastFlushedSequenceId()) {
+ setLastFlushedSequenceId(other.getLastFlushedSequenceId());
+ }
+ if (storeSequenceIdBuilder_ == null) {
+ if (!other.storeSequenceId_.isEmpty()) {
+ if (storeSequenceId_.isEmpty()) {
+ storeSequenceId_ = other.storeSequenceId_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ } else {
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.addAll(other.storeSequenceId_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.storeSequenceId_.isEmpty()) {
+ if (storeSequenceIdBuilder_.isEmpty()) {
+ storeSequenceIdBuilder_.dispose();
+ storeSequenceIdBuilder_ = null;
+ storeSequenceId_ = other.storeSequenceId_;
+ bitField0_ = (bitField0_ & ~0x00000002);
+ storeSequenceIdBuilder_ =
+ com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
+ getStoreSequenceIdFieldBuilder() : null;
+ } else {
+ storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_);
+ }
+ }
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ return this;
+ }
+
+ public final boolean isInitialized() {
+ if (!hasLastFlushedSequenceId()) {
+
+ return false;
+ }
+ for (int i = 0; i < getStoreSequenceIdCount(); i++) {
+ if (!getStoreSequenceId(i).isInitialized()) {
+
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+ com.google.protobuf.UnknownFieldSet.newBuilder(
+ this.getUnknownFields());
+ while (true) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ default: {
+ if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+ this.setUnknownFields(unknownFields.build());
+ onChanged();
+ return this;
+ }
+ break;
+ }
+ case 8: {
+ bitField0_ |= 0x00000001;
+ lastFlushedSequenceId_ = input.readUInt64();
+ break;
+ }
+ case 18: {
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.newBuilder();
+ input.readMessage(subBuilder, extensionRegistry);
+ addStoreSequenceId(subBuilder.buildPartial());
+ break;
+ }
+ }
+ }
+ }
+
+ private int bitField0_;
+
+ // required uint64 lastFlushedSequenceId = 1;
+ private long lastFlushedSequenceId_ ;
+ public boolean hasLastFlushedSequenceId() {
+ return ((bitField0_ & 0x00000001) == 0x00000001);
+ }
+ public long getLastFlushedSequenceId() {
+ return lastFlushedSequenceId_;
+ }
+ public Builder setLastFlushedSequenceId(long value) {
+ bitField0_ |= 0x00000001;
+ lastFlushedSequenceId_ = value;
+ onChanged();
+ return this;
+ }
+ public Builder clearLastFlushedSequenceId() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ lastFlushedSequenceId_ = 0L;
+ onChanged();
+ return this;
+ }
+
+ // repeated .StoreSequenceId storeSequenceId = 2;
+ private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> storeSequenceId_ =
+ java.util.Collections.emptyList();
+ private void ensureStoreSequenceIdIsMutable() {
+ if (!((bitField0_ & 0x00000002) == 0x00000002)) {
+ storeSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId>(storeSequenceId_);
+ bitField0_ |= 0x00000002;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_;
+
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> getStoreSequenceIdList() {
+ if (storeSequenceIdBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(storeSequenceId_);
+ } else {
+ return storeSequenceIdBuilder_.getMessageList();
+ }
+ }
+ public int getStoreSequenceIdCount() {
+ if (storeSequenceIdBuilder_ == null) {
+ return storeSequenceId_.size();
+ } else {
+ return storeSequenceIdBuilder_.getCount();
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId getStoreSequenceId(int index) {
+ if (storeSequenceIdBuilder_ == null) {
+ return storeSequenceId_.get(index);
+ } else {
+ return storeSequenceIdBuilder_.getMessage(index);
+ }
+ }
+ public Builder setStoreSequenceId(
+ int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) {
+ if (storeSequenceIdBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.set(index, value);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ public Builder setStoreSequenceId(
+ int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) {
+ if (storeSequenceIdBuilder_ == null) {
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) {
+ if (storeSequenceIdBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.add(value);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ public Builder addStoreSequenceId(
+ int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId value) {
+ if (storeSequenceIdBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.add(index, value);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ public Builder addStoreSequenceId(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) {
+ if (storeSequenceIdBuilder_ == null) {
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.add(builderForValue.build());
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addStoreSequenceId(
+ int index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder builderForValue) {
+ if (storeSequenceIdBuilder_ == null) {
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ public Builder addAllStoreSequenceId(
+ java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId> values) {
+ if (storeSequenceIdBuilder_ == null) {
+ ensureStoreSequenceIdIsMutable();
+ super.addAll(values, storeSequenceId_);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ public Builder clearStoreSequenceId() {
+ if (storeSequenceIdBuilder_ == null) {
+ storeSequenceId_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000002);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.clear();
+ }
+ return this;
+ }
+ public Builder removeStoreSequenceId(int index) {
+ if (storeSequenceIdBuilder_ == null) {
+ ensureStoreSequenceIdIsMutable();
+ storeSequenceId_.remove(index);
+ onChanged();
+ } else {
+ storeSequenceIdBuilder_.remove(index);
+ }
+ return this;
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder(
+ int index) {
+ return getStoreSequenceIdFieldBuilder().getBuilder(index);
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
+ int index) {
+ if (storeSequenceIdBuilder_ == null) {
+ return storeSequenceId_.get(index); } else {
+ return storeSequenceIdBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>
+ getStoreSequenceIdOrBuilderList() {
+ if (storeSequenceIdBuilder_ != null) {
+ return storeSequenceIdBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(storeSequenceId_);
+ }
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() {
+ return getStoreSequenceIdFieldBuilder().addBuilder(
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance());
+ }
+ public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder(
+ int index) {
+ return getStoreSequenceIdFieldBuilder().addBuilder(
+ index, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.getDefaultInstance());
+ }
+ public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder>
+ getStoreSequenceIdBuilderList() {
+ return getStoreSequenceIdFieldBuilder().getBuilderList();
+ }
+ private com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>
+ getStoreSequenceIdFieldBuilder() {
+ if (storeSequenceIdBuilder_ == null) {
+ storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceIdOrBuilder>(
+ storeSequenceId_,
+ ((bitField0_ & 0x00000002) == 0x00000002),
+ getParentForChildren(),
+ isClean());
+ storeSequenceId_ = null;
+ }
+ return storeSequenceIdBuilder_;
+ }
+
+ // @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds)
+ }
+
+ static {
+ defaultInstance = new RegionStoreSequenceIds(true);
+ defaultInstance.initFields();
+ }
+
+ // @@protoc_insertion_point(class_scope:RegionStoreSequenceIds)
+ }
+
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RootRegionServer_descriptor;
private static
@@ -5796,6 +6922,16 @@ public final class ZooKeeperProtos {
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_TableLock_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_StoreSequenceId_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_StoreSequenceId_fieldAccessorTable;
+ private static com.google.protobuf.Descriptors.Descriptor
+ internal_static_RegionStoreSequenceIds_descriptor;
+ private static
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable
+ internal_static_RegionStoreSequenceIds_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
@@ -5827,9 +6963,13 @@ public final class ZooKeeperProtos {
"ner\030\001 \002(\t\"\207\001\n\tTableLock\022\021\n\ttableName\030\001 \001",
"(\014\022\036\n\tlockOwner\030\002 \001(\0132\013.ServerName\022\020\n\010th" +
"readId\030\003 \001(\003\022\020\n\010isShared\030\004 \001(\010\022\017\n\007purpos" +
- "e\030\005 \001(\t\022\022\n\ncreateTime\030\006 \001(\003BE\n*org.apach" +
- "e.hadoop.hbase.protobuf.generatedB\017ZooKe" +
- "eperProtosH\001\210\001\001\240\001\001"
+ "e\030\005 \001(\t\022\022\n\ncreateTime\030\006 \001(\003\"9\n\017StoreSequ" +
+ "enceId\022\022\n\nfamilyName\030\001 \002(\014\022\022\n\nsequenceId" +
+ "\030\002 \002(\004\"b\n\026RegionStoreSequenceIds\022\035\n\025last" +
+ "FlushedSequenceId\030\001 \002(\004\022)\n\017storeSequence" +
+ "Id\030\002 \003(\0132\020.StoreSequenceIdBE\n*org.apache" +
+ ".hadoop.hbase.protobuf.generatedB\017ZooKee" +
+ "perProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -5924,6 +7064,22 @@ public final class ZooKeeperProtos {
new java.lang.String[] { "TableName", "LockOwner", "ThreadId", "IsShared", "Purpose", "CreateTime", },
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.class,
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.TableLock.Builder.class);
+ internal_static_StoreSequenceId_descriptor =
+ getDescriptor().getMessageTypes().get(11);
+ internal_static_StoreSequenceId_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_StoreSequenceId_descriptor,
+ new java.lang.String[] { "FamilyName", "SequenceId", },
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.class,
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId.Builder.class);
+ internal_static_RegionStoreSequenceIds_descriptor =
+ getDescriptor().getMessageTypes().get(12);
+ internal_static_RegionStoreSequenceIds_fieldAccessorTable = new
+ com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+ internal_static_RegionStoreSequenceIds_descriptor,
+ new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", },
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.class,
+ org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds.Builder.class);
return null;
}
};
Modified: hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto (original)
+++ hbase/trunk/hbase-protocol/src/main/protobuf/ZooKeeper.proto Fri May 24 21:18:32 2013
@@ -145,3 +145,20 @@ message TableLock {
optional string purpose = 5;
optional int64 createTime = 6;
}
+
+/**
+ * sequence Id of a store
+ */
+message StoreSequenceId {
+ required bytes familyName = 1;
+ required uint64 sequenceId = 2;
+}
+
+/**
+ * contains a sequence id of a region which should be the minimum of its store sequence ids and
+ * list sequence ids of the region's stores
+ */
+message RegionStoreSequenceIds {
+ required uint64 lastFlushedSequenceId = 1;
+ repeated StoreSequenceId storeSequenceId = 2;
+}
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java Fri May 24 21:18:32 2013
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.exception
import org.apache.hadoop.hbase.master.SplitLogManager.TaskFinisher.Status;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.regionserver.SplitLogWorker;
import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
@@ -1102,8 +1103,7 @@ public class SplitLogManager extends Zoo
lastRecordedFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
if (lastRecordedFlushedSequenceId < lastSequenceId) {
// update last flushed sequence id in the region level
- ZKUtil.setData(this.watcher, nodePath,
- ZKUtil.positionToByteArray(lastSequenceId));
+ ZKUtil.setData(this.watcher, nodePath, ZKUtil.positionToByteArray(lastSequenceId));
}
}
// go one level deeper with server name
@@ -1113,7 +1113,7 @@ public class SplitLogManager extends Zoo
lastSequenceId = lastRecordedFlushedSequenceId;
}
ZKUtil.createSetData(this.watcher, nodePath,
- ZKUtil.positionToByteArray(lastSequenceId));
+ ZKUtil.regionSequenceIdsToByteArray(lastSequenceId, null));
LOG.debug("Mark region " + regionEncodeName + " recovering from failed region server "
+ serverName);
@@ -1158,10 +1158,11 @@ public class SplitLogManager extends Zoo
* @param zkw
* @param serverName
* @param encodedRegionName
- * @return the last flushed sequence id recorded in ZK of the region for <code>serverName<code>
+ * @return the last flushed sequence ids recorded in ZK of the region for <code>serverName<code>
* @throws IOException
*/
- public static long getLastFlushedSequenceId(ZooKeeperWatcher zkw, String serverName,
+ public static RegionStoreSequenceIds getRegionFlushedSequenceId(ZooKeeperWatcher zkw,
+ String serverName,
String encodedRegionName) throws IOException {
// when SplitLogWorker recovers a region by directly replaying unflushed WAL edits,
// last flushed sequence Id changes when newly assigned RS flushes writes to the region.
@@ -1170,19 +1171,21 @@ public class SplitLogManager extends Zoo
// when different newly assigned RS flushes the region.
// Therefore, in this mode we need to fetch last sequence Ids from ZK where we keep history of
// last flushed sequence Id for each failed RS instance.
- long lastFlushedSequenceId = -1;
+ RegionStoreSequenceIds result = null;
String nodePath = ZKUtil.joinZNode(zkw.recoveringRegionsZNode, encodedRegionName);
nodePath = ZKUtil.joinZNode(nodePath, serverName);
try {
byte[] data = ZKUtil.getData(zkw, nodePath);
if (data != null) {
- lastFlushedSequenceId = SplitLogManager.parseLastFlushedSequenceIdFrom(data);
+ result = ZKUtil.parseRegionStoreSequenceIds(data);
}
} catch (KeeperException e) {
throw new IOException("Cannot get lastFlushedSequenceId from ZooKeeper for server="
+ serverName + "; region=" + encodedRegionName, e);
+ } catch (DeserializationException e) {
+ LOG.warn("Can't parse last flushed sequence Id from znode:" + nodePath, e);
}
- return lastFlushedSequenceId;
+ return result;
}
/**
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java Fri May 24 21:18:32 2013
@@ -291,10 +291,13 @@ public class HRegion implements HeapSize
private final AtomicInteger majorInProgress = new AtomicInteger(0);
private final AtomicInteger minorInProgress = new AtomicInteger(0);
- /**
- * Min sequence id stored in store files of a region when opening the region
- */
- private long minSeqIdForLogReplay = -1;
+ //
+ // Context: During replay we want to ensure that we do not lose any data. So, we
+ // have to be conservative in how we replay logs. For each store, we calculate
+ // the maxSeqId up to which the store was flushed. And, skip the edits which
+ // are equal to or lower than maxSeqId for each store.
+ // The following map is populated when opening the region
+ Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
/**
* @return The smallest mvcc readPoint across all the scanners in this
@@ -620,12 +623,7 @@ public class HRegion implements HeapSize
private long initializeRegionStores(final CancelableProgressable reporter, MonitoredTask status)
throws IOException, UnsupportedEncodingException {
// Load in all the HStores.
- //
- // Context: During replay we want to ensure that we do not lose any data. So, we
- // have to be conservative in how we replay logs. For each store, we calculate
- // the maxSeqId up to which the store was flushed. And, skip the edits which
- // is equal to or lower than maxSeqId for each store.
- Map<byte[], Long> maxSeqIdInStores = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+
long maxSeqId = -1;
// initialized to -1 so that we pick up MemstoreTS from column families
long maxMemstoreTS = -1;
@@ -657,9 +655,6 @@ public class HRegion implements HeapSize
long storeSeqIdForReplay = store.getMaxSequenceId(false);
maxSeqIdInStores.put(store.getColumnFamilyName().getBytes(),
storeSeqIdForReplay);
- if (this.minSeqIdForLogReplay == -1 || storeSeqIdForReplay < this.minSeqIdForLogReplay) {
- this.minSeqIdForLogReplay = storeSeqIdForReplay;
- }
// Include bulk loaded files when determining seqIdForAssignment
long storeSeqIdForAssignment = store.getMaxSequenceId(true);
if (maxSeqId == -1 || storeSeqIdForAssignment > maxSeqId) {
@@ -5036,8 +5031,8 @@ public class HRegion implements HeapSize
public static final long FIXED_OVERHEAD = ClassSize.align(
ClassSize.OBJECT +
ClassSize.ARRAY +
- 38 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
- (12 * Bytes.SIZEOF_LONG) +
+ 39 * ClassSize.REFERENCE + 2 * Bytes.SIZEOF_INT +
+ (11 * Bytes.SIZEOF_LONG) +
Bytes.SIZEOF_BOOLEAN);
public static final long DEEP_OVERHEAD = FIXED_OVERHEAD +
@@ -5051,6 +5046,7 @@ public class HRegion implements HeapSize
(2 * ClassSize.REENTRANT_LOCK) + // lock, updatesLock
ClassSize.ARRAYLIST + // recentFlushes
MultiVersionConsistencyControl.FIXED_SIZE // mvcc
+ + ClassSize.TREEMAP // maxSeqIdInStores
;
@Override
@@ -5562,11 +5558,11 @@ public class HRegion implements HeapSize
}
/**
- * Gets the min sequence number that was read from storage when this region was opened. WAL Edits
- * with smaller sequence number will be skipped from replay.
+ * Gets max sequence ids of stores that was read from storage when this region was opened. WAL
+ * Edits with smaller or equal sequence number will be skipped from replay.
*/
- public long getMinSeqIdForLogReplay() {
- return this.minSeqIdForLogReplay;
+ public Map<byte[], Long> getMaxStoreSeqIdForLogReplay() {
+ return this.maxSeqIdInStores;
}
/**
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Fri May 24 21:18:32 2013
@@ -4241,7 +4241,14 @@ public class HRegionServer implements Cl
HRegionInfo region = r.getRegionInfo();
ZooKeeperWatcher zkw = getZooKeeper();
String previousRSName = this.getLastFailedRSFromZK(region.getEncodedName());
- long minSeqIdForLogReplay = r.getMinSeqIdForLogReplay();
+ Map<byte[], Long> maxSeqIdInStores = r.getMaxStoreSeqIdForLogReplay();
+ long minSeqIdForLogReplay = -1;
+ for (byte[] columnFamily : maxSeqIdInStores.keySet()) {
+ Long storeSeqIdForReplay = maxSeqIdInStores.get(columnFamily);
+ if (minSeqIdForLogReplay == -1 || storeSeqIdForReplay < minSeqIdForLogReplay) {
+ minSeqIdForLogReplay = storeSeqIdForReplay;
+ }
+ }
long lastRecordedFlushedSequenceId = -1;
String nodePath = ZKUtil.joinZNode(this.zooKeeper.recoveringRegionsZNode,
region.getEncodedName());
@@ -4254,10 +4261,11 @@ public class HRegionServer implements Cl
ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
}
if (previousRSName != null) {
- // one level deeper for failed RS
+ // one level deeper for the failed RS
nodePath = ZKUtil.joinZNode(nodePath, previousRSName);
- ZKUtil.setData(zkw, nodePath, ZKUtil.positionToByteArray(minSeqIdForLogReplay));
- LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
+ ZKUtil.setData(zkw, nodePath,
+ ZKUtil.regionSequenceIdsToByteArray(minSeqIdForLogReplay, maxSeqIdInStores));
+ LOG.debug("Update last flushed sequence id of region " + region.getEncodedName() + " for "
+ previousRSName);
} else {
LOG.warn("Can't find failed region server for recovering region " + region.getEncodedName());
Modified: hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java (original)
+++ hbase/trunk/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java Fri May 24 21:18:32 2013
@@ -28,6 +28,7 @@ import java.net.ConnectException;
import java.text.ParseException;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
@@ -81,6 +82,8 @@ import org.apache.hadoop.hbase.monitorin
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.Table;
import org.apache.hadoop.hbase.regionserver.HRegion;
import org.apache.hadoop.hbase.regionserver.LastSequenceId;
@@ -154,7 +157,14 @@ public class HLogSplitter {
protected boolean distributedLogReplay;
// Map encodedRegionName -> lastFlushedSequenceId
- Map<String, Long> lastFlushedSequenceIds = new ConcurrentHashMap<String, Long>();
+ protected Map<String, Long> lastFlushedSequenceIds = new ConcurrentHashMap<String, Long>();
+
+ // Map encodedRegionName -> maxSeqIdInStores
+ protected Map<String, Map<byte[], Long>> regionMaxSeqIdInStores =
+ new ConcurrentHashMap<String, Map<byte[], Long>>();
+
+ // Failed region server that the wal file being split belongs to
+ protected String failedServerName = "";
// Number of writer threads
private final int numWriterThreads;
@@ -528,19 +538,23 @@ public class HLogSplitter {
Entry entry;
Long lastFlushedSequenceId = -1L;
ServerName serverName = HLogUtil.getServerNameFromHLogDirectoryName(logPath);
- String serverNameStr = (serverName == null) ? "" : serverName.getServerName();
+ failedServerName = (serverName == null) ? "" : serverName.getServerName();
while ((entry = getNextLogLine(in, logPath, skipErrors)) != null) {
byte[] region = entry.getKey().getEncodedRegionName();
String key = Bytes.toString(region);
lastFlushedSequenceId = lastFlushedSequenceIds.get(key);
if (lastFlushedSequenceId == null) {
if (this.distributedLogReplay) {
- lastFlushedSequenceId = SplitLogManager.getLastFlushedSequenceId(this.watcher,
- serverNameStr, key);
+ lastFlushedSequenceId = -1L;
+ RegionStoreSequenceIds ids =
+ SplitLogManager.getRegionFlushedSequenceId(this.watcher, failedServerName, key);
+ if (ids != null) {
+ lastFlushedSequenceId = ids.getLastFlushedSequenceId();
+ }
} else if (sequenceIdChecker != null) {
lastFlushedSequenceId = sequenceIdChecker.getLastSequenceId(region);
}
- if (lastFlushedSequenceId != null) {
+ if (lastFlushedSequenceId != null && lastFlushedSequenceId >= 0) {
lastFlushedSequenceIds.put(key, lastFlushedSequenceId);
} else {
lastFlushedSequenceId = -1L;
@@ -1712,6 +1726,8 @@ public class HLogSplitter {
this.skippedEdits.incrementAndGet();
continue;
}
+
+ Map<byte[], Long> maxStoreSequenceIds = null;
boolean needSkip = false;
Put put = null;
Delete del = null;
@@ -1761,16 +1777,32 @@ public class HLogSplitter {
needSkip = true;
break;
}
- cachedLastFlushedSequenceId = lastFlushedSequenceIds.get(loc.getRegionInfo()
- .getEncodedName());
+
+ cachedLastFlushedSequenceId =
+ lastFlushedSequenceIds.get(loc.getRegionInfo().getEncodedName());
if (cachedLastFlushedSequenceId != null
&& cachedLastFlushedSequenceId >= entry.getKey().getLogSeqNum()) {
// skip the whole HLog entry
this.skippedEdits.incrementAndGet();
needSkip = true;
break;
+ } else {
+ if (maxStoreSequenceIds == null) {
+ maxStoreSequenceIds =
+ regionMaxSeqIdInStores.get(loc.getRegionInfo().getEncodedName());
+ }
+ if (maxStoreSequenceIds != null) {
+ Long maxStoreSeqId = maxStoreSequenceIds.get(kv.getFamily());
+ if (maxStoreSeqId == null || maxStoreSeqId >= entry.getKey().getLogSeqNum()) {
+ // skip current kv if column family doesn't exist anymore or already flushed
+ continue;
+ }
+ } else {
+ LOG.warn("Can't find store max sequence ids map for region:"
+ + loc.getRegionInfo().getEncodedName());
+ }
}
-
+
if (kv.isDelete()) {
del = new Delete(kv.getRow());
del.setClusterId(entry.getKey().getClusterId());
@@ -1834,8 +1866,19 @@ public class HLogSplitter {
onlineRegions.add(loc.getRegionInfo().getEncodedName());
// retrieve last flushed sequence Id from ZK. Because region postOpenDeployTasks will
// update the value for the region
- lastFlushedSequenceId = SplitLogManager.getLastFlushedSequenceId(watcher, loc
- .getServerName().getServerName(), loc.getRegionInfo().getEncodedName());
+ RegionStoreSequenceIds ids =
+ SplitLogManager.getRegionFlushedSequenceId(watcher, failedServerName, loc.getRegionInfo()
+ .getEncodedName());
+ if(ids != null) {
+ lastFlushedSequenceId = ids.getLastFlushedSequenceId();
+ Map<byte[], Long> storeIds = new TreeMap<byte[], Long>(Bytes.BYTES_COMPARATOR);
+ List<StoreSequenceId> maxSeqIdInStores = ids.getStoreSequenceIdList();
+ for (StoreSequenceId id : maxSeqIdInStores) {
+ storeIds.put(id.getFamilyName().toByteArray(), id.getSequenceId());
+ }
+ regionMaxSeqIdInStores.put(loc.getRegionInfo().getEncodedName(), storeIds);
+ }
+
if (cachedLastFlushedSequenceId == null
|| lastFlushedSequenceId > cachedLastFlushedSequenceId) {
lastFlushedSequenceIds.put(loc.getRegionInfo().getEncodedName(), lastFlushedSequenceId);
Modified: hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java?rev=1486221&r1=1486220&r2=1486221&view=diff
==============================================================================
--- hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (original)
+++ hbase/trunk/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java Fri May 24 21:18:32 2013
@@ -823,7 +823,7 @@ public class TestDistributedLogSplitting
@Test (timeout=300000)
public void testWorkerAbort() throws Exception {
LOG.info("testWorkerAbort");
- startCluster(2);
+ startCluster(3);
final int NUM_LOG_LINES = 10000;
final SplitLogManager slm = master.getMasterFileSystem().splitLogManager;
FileSystem fs = master.getMasterFileSystem().getFileSystem();
@@ -845,6 +845,7 @@ public class TestDistributedLogSplitting
waitForCounter(tot_wkr_task_acquired, 0, 1, 1000);
for (RegionServerThread rst : rsts) {
rst.getRegionServer().abort("testing");
+ break;
}
}
}.start();