You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2012/05/02 18:26:38 UTC

svn commit: r1333099 [3/5] - in /hbase/trunk/src: main/java/org/apache/hadoop/hbase/ main/java/org/apache/hadoop/hbase/executor/ main/java/org/apache/hadoop/hbase/master/ main/java/org/apache/hadoop/hbase/protobuf/ main/java/org/apache/hadoop/hbase/pro...

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java Wed May  2 16:26:36 2012
@@ -1786,6 +1786,1403 @@ public final class ZooKeeperProtos {
     // @@protoc_insertion_point(class_scope:ClusterUp)
   }
   
+  public interface RegionTransitionOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required uint32 eventTypeCode = 1;
+    boolean hasEventTypeCode();
+    int getEventTypeCode();
+    
+    // required bytes regionName = 2;
+    boolean hasRegionName();
+    com.google.protobuf.ByteString getRegionName();
+    
+    // required uint64 createTime = 3;
+    boolean hasCreateTime();
+    long getCreateTime();
+    
+    // optional .ServerName originServerName = 4;
+    boolean hasOriginServerName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder();
+    
+    // optional bytes payload = 5;
+    boolean hasPayload();
+    com.google.protobuf.ByteString getPayload();
+  }
+  public static final class RegionTransition extends
+      com.google.protobuf.GeneratedMessage
+      implements RegionTransitionOrBuilder {
+    // Use RegionTransition.newBuilder() to construct.
+    private RegionTransition(Builder builder) {
+      super(builder);
+    }
+    private RegionTransition(boolean noInit) {}
+    
+    private static final RegionTransition defaultInstance;
+    public static RegionTransition getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public RegionTransition getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable;
+    }
+    
+    private int bitField0_;
+    // required uint32 eventTypeCode = 1;
+    public static final int EVENTTYPECODE_FIELD_NUMBER = 1;
+    private int eventTypeCode_;
+    public boolean hasEventTypeCode() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public int getEventTypeCode() {
+      return eventTypeCode_;
+    }
+    
+    // required bytes regionName = 2;
+    public static final int REGIONNAME_FIELD_NUMBER = 2;
+    private com.google.protobuf.ByteString regionName_;
+    public boolean hasRegionName() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public com.google.protobuf.ByteString getRegionName() {
+      return regionName_;
+    }
+    
+    // required uint64 createTime = 3;
+    public static final int CREATETIME_FIELD_NUMBER = 3;
+    private long createTime_;
+    public boolean hasCreateTime() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    public long getCreateTime() {
+      return createTime_;
+    }
+    
+    // optional .ServerName originServerName = 4;
+    public static final int ORIGINSERVERNAME_FIELD_NUMBER = 4;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName originServerName_;
+    public boolean hasOriginServerName() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName() {
+      return originServerName_;
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder() {
+      return originServerName_;
+    }
+    
+    // optional bytes payload = 5;
+    public static final int PAYLOAD_FIELD_NUMBER = 5;
+    private com.google.protobuf.ByteString payload_;
+    public boolean hasPayload() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    public com.google.protobuf.ByteString getPayload() {
+      return payload_;
+    }
+    
+    private void initFields() {
+      eventTypeCode_ = 0;
+      regionName_ = com.google.protobuf.ByteString.EMPTY;
+      createTime_ = 0L;
+      originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      payload_ = com.google.protobuf.ByteString.EMPTY;
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasEventTypeCode()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasRegionName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasCreateTime()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (hasOriginServerName()) {
+        if (!getOriginServerName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt32(1, eventTypeCode_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeBytes(2, regionName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeUInt64(3, createTime_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeMessage(4, originServerName_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeBytes(5, payload_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt32Size(1, eventTypeCode_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(2, regionName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(3, createTime_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, originServerName_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeBytesSize(5, payload_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) obj;
+      
+      boolean result = true;
+      result = result && (hasEventTypeCode() == other.hasEventTypeCode());
+      if (hasEventTypeCode()) {
+        result = result && (getEventTypeCode()
+            == other.getEventTypeCode());
+      }
+      result = result && (hasRegionName() == other.hasRegionName());
+      if (hasRegionName()) {
+        result = result && getRegionName()
+            .equals(other.getRegionName());
+      }
+      result = result && (hasCreateTime() == other.hasCreateTime());
+      if (hasCreateTime()) {
+        result = result && (getCreateTime()
+            == other.getCreateTime());
+      }
+      result = result && (hasOriginServerName() == other.hasOriginServerName());
+      if (hasOriginServerName()) {
+        result = result && getOriginServerName()
+            .equals(other.getOriginServerName());
+      }
+      result = result && (hasPayload() == other.hasPayload());
+      if (hasPayload()) {
+        result = result && getPayload()
+            .equals(other.getPayload());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+    
+    @java.lang.Override
+    public int hashCode() {
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasEventTypeCode()) {
+        hash = (37 * hash) + EVENTTYPECODE_FIELD_NUMBER;
+        hash = (53 * hash) + getEventTypeCode();
+      }
+      if (hasRegionName()) {
+        hash = (37 * hash) + REGIONNAME_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionName().hashCode();
+      }
+      if (hasCreateTime()) {
+        hash = (37 * hash) + CREATETIME_FIELD_NUMBER;
+        hash = (53 * hash) + hashLong(getCreateTime());
+      }
+      if (hasOriginServerName()) {
+        hash = (37 * hash) + ORIGINSERVERNAME_FIELD_NUMBER;
+        hash = (53 * hash) + getOriginServerName().hashCode();
+      }
+      if (hasPayload()) {
+        hash = (37 * hash) + PAYLOAD_FIELD_NUMBER;
+        hash = (53 * hash) + getPayload().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      return hash;
+    }
+    
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransitionOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_RegionTransition_fieldAccessorTable;
+      }
+      
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getOriginServerNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        eventTypeCode_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        regionName_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000002);
+        createTime_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        if (originServerNameBuilder_ == null) {
+          originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        } else {
+          originServerNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        payload_ = com.google.protobuf.ByteString.EMPTY;
+        bitField0_ = (bitField0_ & ~0x00000010);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance();
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition build() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.eventTypeCode_ = eventTypeCode_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.regionName_ = regionName_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.createTime_ = createTime_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        if (originServerNameBuilder_ == null) {
+          result.originServerName_ = originServerName_;
+        } else {
+          result.originServerName_ = originServerNameBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.payload_ = payload_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.getDefaultInstance()) return this;
+        if (other.hasEventTypeCode()) {
+          setEventTypeCode(other.getEventTypeCode());
+        }
+        if (other.hasRegionName()) {
+          setRegionName(other.getRegionName());
+        }
+        if (other.hasCreateTime()) {
+          setCreateTime(other.getCreateTime());
+        }
+        if (other.hasOriginServerName()) {
+          mergeOriginServerName(other.getOriginServerName());
+        }
+        if (other.hasPayload()) {
+          setPayload(other.getPayload());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasEventTypeCode()) {
+          
+          return false;
+        }
+        if (!hasRegionName()) {
+          
+          return false;
+        }
+        if (!hasCreateTime()) {
+          
+          return false;
+        }
+        if (hasOriginServerName()) {
+          if (!getOriginServerName().isInitialized()) {
+            
+            return false;
+          }
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              eventTypeCode_ = input.readUInt32();
+              break;
+            }
+            case 18: {
+              bitField0_ |= 0x00000002;
+              regionName_ = input.readBytes();
+              break;
+            }
+            case 24: {
+              bitField0_ |= 0x00000004;
+              createTime_ = input.readUInt64();
+              break;
+            }
+            case 34: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
+              if (hasOriginServerName()) {
+                subBuilder.mergeFrom(getOriginServerName());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setOriginServerName(subBuilder.buildPartial());
+              break;
+            }
+            case 42: {
+              bitField0_ |= 0x00000010;
+              payload_ = input.readBytes();
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // required uint32 eventTypeCode = 1;
+      private int eventTypeCode_ ;
+      public boolean hasEventTypeCode() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public int getEventTypeCode() {
+        return eventTypeCode_;
+      }
+      public Builder setEventTypeCode(int value) {
+        bitField0_ |= 0x00000001;
+        eventTypeCode_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearEventTypeCode() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        eventTypeCode_ = 0;
+        onChanged();
+        return this;
+      }
+      
+      // required bytes regionName = 2;
+      private com.google.protobuf.ByteString regionName_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasRegionName() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public com.google.protobuf.ByteString getRegionName() {
+        return regionName_;
+      }
+      public Builder setRegionName(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        regionName_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearRegionName() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        regionName_ = getDefaultInstance().getRegionName();
+        onChanged();
+        return this;
+      }
+      
+      // required uint64 createTime = 3;
+      private long createTime_ ;
+      public boolean hasCreateTime() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      public long getCreateTime() {
+        return createTime_;
+      }
+      public Builder setCreateTime(long value) {
+        bitField0_ |= 0x00000004;
+        createTime_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearCreateTime() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        createTime_ = 0L;
+        onChanged();
+        return this;
+      }
+      
+      // optional .ServerName originServerName = 4;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> originServerNameBuilder_;
+      public boolean hasOriginServerName() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getOriginServerName() {
+        if (originServerNameBuilder_ == null) {
+          return originServerName_;
+        } else {
+          return originServerNameBuilder_.getMessage();
+        }
+      }
+      public Builder setOriginServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (originServerNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          originServerName_ = value;
+          onChanged();
+        } else {
+          originServerNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder setOriginServerName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (originServerNameBuilder_ == null) {
+          originServerName_ = builderForValue.build();
+          onChanged();
+        } else {
+          originServerNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder mergeOriginServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (originServerNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000008) == 0x00000008) &&
+              originServerName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+            originServerName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(originServerName_).mergeFrom(value).buildPartial();
+          } else {
+            originServerName_ = value;
+          }
+          onChanged();
+        } else {
+          originServerNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      public Builder clearOriginServerName() {
+        if (originServerNameBuilder_ == null) {
+          originServerName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          onChanged();
+        } else {
+          originServerNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getOriginServerNameBuilder() {
+        bitField0_ |= 0x00000008;
+        onChanged();
+        return getOriginServerNameFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getOriginServerNameOrBuilder() {
+        if (originServerNameBuilder_ != null) {
+          return originServerNameBuilder_.getMessageOrBuilder();
+        } else {
+          return originServerName_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
+          getOriginServerNameFieldBuilder() {
+        if (originServerNameBuilder_ == null) {
+          originServerNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  originServerName_,
+                  getParentForChildren(),
+                  isClean());
+          originServerName_ = null;
+        }
+        return originServerNameBuilder_;
+      }
+      
+      // optional bytes payload = 5;
+      private com.google.protobuf.ByteString payload_ = com.google.protobuf.ByteString.EMPTY;
+      public boolean hasPayload() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      public com.google.protobuf.ByteString getPayload() {
+        return payload_;
+      }
+      public Builder setPayload(com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        payload_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearPayload() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        payload_ = getDefaultInstance().getPayload();
+        onChanged();
+        return this;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:RegionTransition)
+    }
+    
+    static {
+      defaultInstance = new RegionTransition(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:RegionTransition)
+  }
+  
+  public interface SplitLogTaskOrBuilder
+      extends com.google.protobuf.MessageOrBuilder {
+    
+    // required .SplitLogTask.State state = 1;
+    boolean hasState();
+    org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState();
+    
+    // required .ServerName serverName = 2;
+    boolean hasServerName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName();
+    org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder();
+  }
+  public static final class SplitLogTask extends
+      com.google.protobuf.GeneratedMessage
+      implements SplitLogTaskOrBuilder {
+    // Use SplitLogTask.newBuilder() to construct.
+    private SplitLogTask(Builder builder) {
+      super(builder);
+    }
+    private SplitLogTask(boolean noInit) {}
+    
+    private static final SplitLogTask defaultInstance;
+    public static SplitLogTask getDefaultInstance() {
+      return defaultInstance;
+    }
+    
+    public SplitLogTask getDefaultInstanceForType() {
+      return defaultInstance;
+    }
+    
+    public static final com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor;
+    }
+    
+    protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable;
+    }
+    
+    public enum State
+        implements com.google.protobuf.ProtocolMessageEnum {
+      UNASSIGNED(0, 0),
+      OWNED(1, 1),
+      RESIGNED(2, 2),
+      DONE(3, 3),
+      ERR(4, 4),
+      ;
+      
+      public static final int UNASSIGNED_VALUE = 0;
+      public static final int OWNED_VALUE = 1;
+      public static final int RESIGNED_VALUE = 2;
+      public static final int DONE_VALUE = 3;
+      public static final int ERR_VALUE = 4;
+      
+      
+      public final int getNumber() { return value; }
+      
+      public static State valueOf(int value) {
+        switch (value) {
+          case 0: return UNASSIGNED;
+          case 1: return OWNED;
+          case 2: return RESIGNED;
+          case 3: return DONE;
+          case 4: return ERR;
+          default: return null;
+        }
+      }
+      
+      public static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalGetValueMap() {
+        return internalValueMap;
+      }
+      private static com.google.protobuf.Internal.EnumLiteMap<State>
+          internalValueMap =
+            new com.google.protobuf.Internal.EnumLiteMap<State>() {
+              public State findValueByNumber(int number) {
+                return State.valueOf(number);
+              }
+            };
+      
+      public final com.google.protobuf.Descriptors.EnumValueDescriptor
+          getValueDescriptor() {
+        return getDescriptor().getValues().get(index);
+      }
+      public final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptorForType() {
+        return getDescriptor();
+      }
+      public static final com.google.protobuf.Descriptors.EnumDescriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor().getEnumTypes().get(0);
+      }
+      
+      private static final State[] VALUES = {
+        UNASSIGNED, OWNED, RESIGNED, DONE, ERR, 
+      };
+      
+      public static State valueOf(
+          com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+        if (desc.getType() != getDescriptor()) {
+          throw new java.lang.IllegalArgumentException(
+            "EnumValueDescriptor is not for this type.");
+        }
+        return VALUES[desc.getIndex()];
+      }
+      
+      private final int index;
+      private final int value;
+      
+      private State(int index, int value) {
+        this.index = index;
+        this.value = value;
+      }
+      
+      // @@protoc_insertion_point(enum_scope:SplitLogTask.State)
+    }
+    
+    private int bitField0_;
+    // required .SplitLogTask.State state = 1;
+    public static final int STATE_FIELD_NUMBER = 1;
+    private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_;
+    public boolean hasState() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
+      return state_;
+    }
+    
+    // required .ServerName serverName = 2;
+    public static final int SERVERNAME_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_;
+    public boolean hasServerName() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+      return serverName_;
+    }
+    public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+      return serverName_;
+    }
+    
+    private void initFields() {
+      state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
+      serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+    }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized != -1) return isInitialized == 1;
+      
+      if (!hasState()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasServerName()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getServerName().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+    
+    public void writeTo(com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      getSerializedSize();
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, serverName_);
+      }
+      getUnknownFields().writeTo(output);
+    }
+    
+    private int memoizedSerializedSize = -1;
+    public int getSerializedSize() {
+      int size = memoizedSerializedSize;
+      if (size != -1) return size;
+    
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, state_.getNumber());
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, serverName_);
+      }
+      size += getUnknownFields().getSerializedSize();
+      memoizedSerializedSize = size;
+      return size;
+    }
+    
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    protected java.lang.Object writeReplace()
+        throws java.io.ObjectStreamException {
+      return super.writeReplace();
+    }
+    
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other = (org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) obj;
+      
+      boolean result = true;
+      result = result && (hasState() == other.hasState());
+      if (hasState()) {
+        result = result &&
+            (getState() == other.getState());
+      }
+      result = result && (hasServerName() == other.hasServerName());
+      if (hasServerName()) {
+        result = result && getServerName()
+            .equals(other.getServerName());
+      }
+      result = result &&
+          getUnknownFields().equals(other.getUnknownFields());
+      return result;
+    }
+    
+    @java.lang.Override
+    public int hashCode() {
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasState()) {
+        hash = (37 * hash) + STATE_FIELD_NUMBER;
+        hash = (53 * hash) + hashEnum(getState());
+      }
+      if (hasServerName()) {
+        hash = (37 * hash) + SERVERNAME_FIELD_NUMBER;
+        hash = (53 * hash) + getServerName().hashCode();
+      }
+      hash = (29 * hash) + getUnknownFields().hashCode();
+      return hash;
+    }
+    
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        com.google.protobuf.ByteString data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        com.google.protobuf.ByteString data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(byte[] data)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        byte[] data,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws com.google.protobuf.InvalidProtocolBufferException {
+      return newBuilder().mergeFrom(data, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseDelimitedFrom(
+        java.io.InputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      Builder builder = newBuilder();
+      if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
+        return builder.buildParsed();
+      } else {
+        return null;
+      }
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input).buildParsed();
+    }
+    public static org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask parseFrom(
+        com.google.protobuf.CodedInputStream input,
+        com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return newBuilder().mergeFrom(input, extensionRegistry)
+               .buildParsed();
+    }
+    
+    public static Builder newBuilder() { return Builder.create(); }
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask prototype) {
+      return newBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() { return newBuilder(this); }
+    
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        com.google.protobuf.GeneratedMessage.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    public static final class Builder extends
+        com.google.protobuf.GeneratedMessage.Builder<Builder>
+       implements org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTaskOrBuilder {
+      public static final com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_descriptor;
+      }
+      
+      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_SplitLogTask_fieldAccessorTable;
+      }
+      
+      // Construct using org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+      
+      private Builder(BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
+          getServerNameFieldBuilder();
+        }
+      }
+      private static Builder create() {
+        return new Builder();
+      }
+      
+      public Builder clear() {
+        super.clear();
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      
+      public Builder clone() {
+        return create().mergeFrom(buildPartial());
+      }
+      
+      public com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDescriptor();
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance();
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask build() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+      
+      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildParsed()
+          throws com.google.protobuf.InvalidProtocolBufferException {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(
+            result).asInvalidProtocolBufferException();
+        }
+        return result;
+      }
+      
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask buildPartial() {
+        org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask result = new org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.state_ = state_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (serverNameBuilder_ == null) {
+          result.serverName_ = serverName_;
+        } else {
+          result.serverName_ = serverNameBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+      
+      public Builder mergeFrom(com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask) {
+          return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+      
+      public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask other) {
+        if (other == org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.getDefaultInstance()) return this;
+        if (other.hasState()) {
+          setState(other.getState());
+        }
+        if (other.hasServerName()) {
+          mergeServerName(other.getServerName());
+        }
+        this.mergeUnknownFields(other.getUnknownFields());
+        return this;
+      }
+      
+      public final boolean isInitialized() {
+        if (!hasState()) {
+          
+          return false;
+        }
+        if (!hasServerName()) {
+          
+          return false;
+        }
+        if (!getServerName().isInitialized()) {
+          
+          return false;
+        }
+        return true;
+      }
+      
+      public Builder mergeFrom(
+          com.google.protobuf.CodedInputStream input,
+          com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          com.google.protobuf.UnknownFieldSet.newBuilder(
+            this.getUnknownFields());
+        while (true) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              this.setUnknownFields(unknownFields.build());
+              onChanged();
+              return this;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                this.setUnknownFields(unknownFields.build());
+                onChanged();
+                return this;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                state_ = value;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder();
+              if (hasServerName()) {
+                subBuilder.mergeFrom(getServerName());
+              }
+              input.readMessage(subBuilder, extensionRegistry);
+              setServerName(subBuilder.buildPartial());
+              break;
+            }
+          }
+        }
+      }
+      
+      private int bitField0_;
+      
+      // required .SplitLogTask.State state = 1;
+      private org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
+      public boolean hasState() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State getState() {
+        return state_;
+      }
+      public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        state_ = value;
+        onChanged();
+        return this;
+      }
+      public Builder clearState() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        state_ = org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.State.UNASSIGNED;
+        onChanged();
+        return this;
+      }
+      
+      // required .ServerName serverName = 2;
+      private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverNameBuilder_;
+      public boolean hasServerName() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServerName() {
+        if (serverNameBuilder_ == null) {
+          return serverName_;
+        } else {
+          return serverNameBuilder_.getMessage();
+        }
+      }
+      public Builder setServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          serverName_ = value;
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder setServerName(
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
+        if (serverNameBuilder_ == null) {
+          serverName_ = builderForValue.build();
+          onChanged();
+        } else {
+          serverNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder mergeServerName(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
+        if (serverNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              serverName_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
+            serverName_ =
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(serverName_).mergeFrom(value).buildPartial();
+          } else {
+            serverName_ = value;
+          }
+          onChanged();
+        } else {
+          serverNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      public Builder clearServerName() {
+        if (serverNameBuilder_ == null) {
+          serverName_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
+          onChanged();
+        } else {
+          serverNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerNameBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getServerNameFieldBuilder().getBuilder();
+      }
+      public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerNameOrBuilder() {
+        if (serverNameBuilder_ != null) {
+          return serverNameBuilder_.getMessageOrBuilder();
+        } else {
+          return serverName_;
+        }
+      }
+      private com.google.protobuf.SingleFieldBuilder<
+          org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> 
+          getServerNameFieldBuilder() {
+        if (serverNameBuilder_ == null) {
+          serverNameBuilder_ = new com.google.protobuf.SingleFieldBuilder<
+              org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
+                  serverName_,
+                  getParentForChildren(),
+                  isClean());
+          serverName_ = null;
+        }
+        return serverNameBuilder_;
+      }
+      
+      // @@protoc_insertion_point(builder_scope:SplitLogTask)
+    }
+    
+    static {
+      defaultInstance = new SplitLogTask(true);
+      defaultInstance.initFields();
+    }
+    
+    // @@protoc_insertion_point(class_scope:SplitLogTask)
+  }
+  
   private static com.google.protobuf.Descriptors.Descriptor
     internal_static_RootRegionServer_descriptor;
   private static
@@ -1806,6 +3203,16 @@ public final class ZooKeeperProtos {
   private static
     com.google.protobuf.GeneratedMessage.FieldAccessorTable
       internal_static_ClusterUp_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_RegionTransition_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_RegionTransition_fieldAccessorTable;
+  private static com.google.protobuf.Descriptors.Descriptor
+    internal_static_SplitLogTask_descriptor;
+  private static
+    com.google.protobuf.GeneratedMessage.FieldAccessorTable
+      internal_static_SplitLogTask_fieldAccessorTable;
   
   public static com.google.protobuf.Descriptors.FileDescriptor
       getDescriptor() {
@@ -1819,9 +3226,16 @@ public final class ZooKeeperProtos {
       "gionServer\022\033\n\006server\030\001 \002(\0132\013.ServerName\"" +
       "%\n\006Master\022\033\n\006master\030\001 \002(\0132\013.ServerName\"\036" +
       "\n\tClusterId\022\021\n\tclusterId\030\001 \002(\t\"\036\n\tCluste" +
-      "rUp\022\021\n\tstartDate\030\001 \002(\tBE\n*org.apache.had" +
-      "oop.hbase.protobuf.generatedB\017ZooKeeperP" +
-      "rotosH\001\210\001\001\240\001\001"
+      "rUp\022\021\n\tstartDate\030\001 \002(\t\"\211\001\n\020RegionTransit" +
+      "ion\022\025\n\reventTypeCode\030\001 \002(\r\022\022\n\nregionName" +
+      "\030\002 \002(\014\022\022\n\ncreateTime\030\003 \002(\004\022%\n\020originServ" +
+      "erName\030\004 \001(\0132\013.ServerName\022\017\n\007payload\030\005 \001" +
+      "(\014\"\230\001\n\014SplitLogTask\022\"\n\005state\030\001 \002(\0162\023.Spl" +
+      "itLogTask.State\022\037\n\nserverName\030\002 \002(\0132\013.Se",
+      "rverName\"C\n\005State\022\016\n\nUNASSIGNED\020\000\022\t\n\005OWN" +
+      "ED\020\001\022\014\n\010RESIGNED\020\002\022\010\n\004DONE\020\003\022\007\n\003ERR\020\004BE\n" +
+      "*org.apache.hadoop.hbase.protobuf.genera" +
+      "tedB\017ZooKeeperProtosH\001\210\001\001\240\001\001"
     };
     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -1860,6 +3274,22 @@ public final class ZooKeeperProtos {
               new java.lang.String[] { "StartDate", },
               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.class,
               org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.ClusterUp.Builder.class);
+          internal_static_RegionTransition_descriptor =
+            getDescriptor().getMessageTypes().get(4);
+          internal_static_RegionTransition_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_RegionTransition_descriptor,
+              new java.lang.String[] { "EventTypeCode", "RegionName", "CreateTime", "OriginServerName", "Payload", },
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.class,
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionTransition.Builder.class);
+          internal_static_SplitLogTask_descriptor =
+            getDescriptor().getMessageTypes().get(5);
+          internal_static_SplitLogTask_fieldAccessorTable = new
+            com.google.protobuf.GeneratedMessage.FieldAccessorTable(
+              internal_static_SplitLogTask_descriptor,
+              new java.lang.String[] { "State", "ServerName", },
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.class,
+              org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.Builder.class);
           return null;
         }
       };

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java Wed May  2 16:26:36 2012
@@ -1347,7 +1347,7 @@ public class HRegionServer extends Regio
 
     // Create the log splitting worker and start it
     this.splitLogWorker = new SplitLogWorker(this.zooKeeper,
-        this.getConfiguration(), this.getServerName().toString());
+        this.getConfiguration(), this.getServerName());
     splitLogWorker.start();
   }
 

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitLogWorker.java Wed May  2 16:26:36 2012
@@ -19,8 +19,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static org.apache.hadoop.hbase.zookeeper.ZKSplitLog.Counters.*;
-
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.List;
@@ -32,12 +30,14 @@ import org.apache.hadoop.classification.
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.master.SplitLogManager;
+import org.apache.hadoop.hbase.DeserializationException;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.SplitLogCounters;
+import org.apache.hadoop.hbase.SplitLogTask;
 import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-import org.apache.hadoop.hbase.zookeeper.ZKSplitLog.TaskState;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
@@ -71,9 +71,8 @@ public class SplitLogWorker extends ZooK
   private static final Log LOG = LogFactory.getLog(SplitLogWorker.class);
 
   Thread worker;
-  private final String serverName;
+  private final ServerName serverName;
   private final TaskExecutor splitTaskExecutor;
-  private long zkretries;
 
   private Object taskReadyLock = new Object();
   volatile int taskReadySeq = 0;
@@ -85,15 +84,14 @@ public class SplitLogWorker extends ZooK
 
 
   public SplitLogWorker(ZooKeeperWatcher watcher, Configuration conf,
-      String serverName, TaskExecutor splitTaskExecutor) {
+      ServerName serverName, TaskExecutor splitTaskExecutor) {
     super(watcher);
     this.serverName = serverName;
     this.splitTaskExecutor = splitTaskExecutor;
-    this.zkretries = conf.getLong("hbase.splitlog.zk.retries", 3);
   }
 
   public SplitLogWorker(ZooKeeperWatcher watcher, final Configuration conf,
-      final String serverName) {
+      final ServerName serverName) {
     this(watcher, conf, serverName, new TaskExecutor () {
       @Override
       public Status exec(String filename, CancelableProgressable p) {
@@ -111,24 +109,21 @@ public class SplitLogWorker extends ZooK
         // encountered a bad non-retry-able persistent error.
         try {
           String tmpname =
-            ZKSplitLog.getSplitLogDirTmpComponent(serverName, filename);
+            ZKSplitLog.getSplitLogDirTmpComponent(serverName.toString(), filename);
           if (HLogSplitter.splitLogFileToTemp(rootdir, tmpname,
               fs.getFileStatus(new Path(filename)), fs, conf, p) == false) {
             return Status.PREEMPTED;
           }
         } catch (InterruptedIOException iioe) {
-          LOG.warn("log splitting of " + filename + " interrupted, resigning",
-              iioe);
+          LOG.warn("log splitting of " + filename + " interrupted, resigning", iioe);
           return Status.RESIGNED;
         } catch (IOException e) {
           Throwable cause = e.getCause();
           if (cause instanceof InterruptedException) {
-            LOG.warn("log splitting of " + filename + " interrupted, resigning",
-                e);
+            LOG.warn("log splitting of " + filename + " interrupted, resigning", e);
             return Status.RESIGNED;
           }
-          LOG.warn("log splitting of " + filename + " failed, returning error",
-              e);
+          LOG.warn("log splitting of " + filename + " failed, returning error", e);
           return Status.ERR;
         }
         return Status.DONE;
@@ -149,13 +144,11 @@ public class SplitLogWorker extends ZooK
           res = ZKUtil.checkExists(watcher, watcher.splitLogZNode);
         } catch (KeeperException e) {
           // ignore
-          LOG.warn("Exception when checking for " + watcher.splitLogZNode +
-              " ... retrying", e);
+          LOG.warn("Exception when checking for " + watcher.splitLogZNode  + " ... retrying", e);
         }
         if (res == -1) {
           try {
-            LOG.info(watcher.splitLogZNode + " znode does not exist," +
-                " waiting for master to create one");
+            LOG.info(watcher.splitLogZNode + " znode does not exist, waiting for master to create");
             Thread.sleep(1000);
           } catch (InterruptedException e) {
             LOG.debug("Interrupted while waiting for " + watcher.splitLogZNode
@@ -241,31 +234,40 @@ public class SplitLogWorker extends ZooK
     try {
       try {
         if ((data = ZKUtil.getDataNoWatch(this.watcher, path, stat)) == null) {
-          tot_wkr_failed_to_grab_task_no_data.incrementAndGet();
+          SplitLogCounters.tot_wkr_failed_to_grab_task_no_data.incrementAndGet();
           return;
         }
       } catch (KeeperException e) {
         LOG.warn("Failed to get data for znode " + path, e);
-        tot_wkr_failed_to_grab_task_exception.incrementAndGet();
+        SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
+        return;
+      }
+      SplitLogTask slt;
+      try {
+        slt = SplitLogTask.parseFrom(data);
+      } catch (DeserializationException e) {
+        LOG.warn("Failed parse data for znode " + path, e);
+        SplitLogCounters.tot_wkr_failed_to_grab_task_exception.incrementAndGet();
         return;
       }
-      if (TaskState.TASK_UNASSIGNED.equals(data) == false) {
-        tot_wkr_failed_to_grab_task_owned.incrementAndGet();
+      if (slt.isUnassigned() == false) {
+        SplitLogCounters.tot_wkr_failed_to_grab_task_owned.incrementAndGet();
         return;
       }
 
       currentVersion = stat.getVersion();
       if (attemptToOwnTask(true) == false) {
-        tot_wkr_failed_to_grab_task_lost_race.incrementAndGet();
+        SplitLogCounters.tot_wkr_failed_to_grab_task_lost_race.incrementAndGet();
         return;
       }
 
       if (ZKSplitLog.isRescanNode(watcher, currentTask)) {
-        endTask(TaskState.TASK_DONE, tot_wkr_task_acquired_rescan);
+        endTask(new SplitLogTask.Done(this.serverName),
+          SplitLogCounters.tot_wkr_task_acquired_rescan);
         return;
       }
       LOG.info("worker " + serverName + " acquired task " + path);
-      tot_wkr_task_acquired.incrementAndGet();
+      SplitLogCounters.tot_wkr_task_acquired.incrementAndGet();
       getDataSetWatchAsync();
 
       t = System.currentTimeMillis();
@@ -285,15 +287,15 @@ public class SplitLogWorker extends ZooK
       });
       switch (status) {
         case DONE:
-          endTask(TaskState.TASK_DONE, tot_wkr_task_done);
+          endTask(new SplitLogTask.Done(this.serverName), SplitLogCounters.tot_wkr_task_done);
           break;
         case PREEMPTED:
-          tot_wkr_preempt_task.incrementAndGet();
+          SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
           LOG.warn("task execution prempted " + path);
           break;
         case ERR:
           if (!exitWorker) {
-            endTask(TaskState.TASK_ERR, tot_wkr_task_err);
+            endTask(new SplitLogTask.Err(this.serverName), SplitLogCounters.tot_wkr_task_err);
             break;
           }
           // if the RS is exiting then there is probably a tons of stuff
@@ -301,13 +303,12 @@ public class SplitLogWorker extends ZooK
           //$FALL-THROUGH$
         case RESIGNED:
           if (exitWorker) {
-            LOG.info("task execution interrupted because worker is exiting " +
-                path);
-            endTask(TaskState.TASK_RESIGNED, tot_wkr_task_resigned);
+            LOG.info("task execution interrupted because worker is exiting " + path);
+            endTask(new SplitLogTask.Resigned(this.serverName),
+              SplitLogCounters.tot_wkr_task_resigned);
           } else {
-            tot_wkr_preempt_task.incrementAndGet();
-            LOG.info("task execution interrupted via zk by manager " +
-                path);
+            SplitLogCounters.tot_wkr_preempt_task.incrementAndGet();
+            LOG.info("task execution interrupted via zk by manager " + path);
           }
           break;
       }
@@ -337,15 +338,16 @@ public class SplitLogWorker extends ZooK
    */
   private boolean attemptToOwnTask(boolean isFirstTime) {
     try {
-      Stat stat = this.watcher.getRecoverableZooKeeper().setData(currentTask,
-          TaskState.TASK_OWNED.get(serverName), currentVersion);
+      SplitLogTask slt = new SplitLogTask.Owned(this.serverName);
+      Stat stat =
+        this.watcher.getRecoverableZooKeeper().setData(currentTask, slt.toByteArray(), currentVersion);
       if (stat == null) {
         LOG.warn("zk.setData() returned null for path " + currentTask);
-        tot_wkr_task_heartbeat_failed.incrementAndGet();
+        SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
         return (false);
       }
       currentVersion = stat.getVersion();
-      tot_wkr_task_heartbeat.incrementAndGet();
+      SplitLogCounters.tot_wkr_task_heartbeat.incrementAndGet();
       return (true);
     } catch (KeeperException e) {
       if (!isFirstTime) {
@@ -363,7 +365,7 @@ public class SplitLogWorker extends ZooK
           currentTask + " " + StringUtils.stringifyException(e1));
       Thread.currentThread().interrupt();
     }
-    tot_wkr_task_heartbeat_failed.incrementAndGet();
+    SplitLogCounters.tot_wkr_task_heartbeat_failed.incrementAndGet();
     return (false);
   }
 
@@ -373,29 +375,28 @@ public class SplitLogWorker extends ZooK
    * @param ts
    * @param ctr
    */
-  private void endTask(ZKSplitLog.TaskState ts, AtomicLong ctr) {
+  private void endTask(SplitLogTask slt, AtomicLong ctr) {
     String path = currentTask;
     currentTask = null;
     try {
-      if (ZKUtil.setData(this.watcher, path, ts.get(serverName),
+      if (ZKUtil.setData(this.watcher, path, slt.toByteArray(),
           currentVersion)) {
-        LOG.info("successfully transitioned task " + path +
-            " to final state " + ts);
+        LOG.info("successfully transitioned task " + path + " to final state " + slt);
         ctr.incrementAndGet();
         return;
       }
-      LOG.warn("failed to transistion task " + path + " to end state " + ts +
+      LOG.warn("failed to transistion task " + path + " to end state " + slt +
           " because of version mismatch ");
     } catch (KeeperException.BadVersionException bve) {
-      LOG.warn("transisition task " + path + " to " + ts +
+      LOG.warn("transisition task " + path + " to " + slt +
           " failed because of version mismatch", bve);
     } catch (KeeperException.NoNodeException e) {
-      LOG.fatal("logic error - end task " + path + " " + ts +
+      LOG.fatal("logic error - end task " + path + " " + slt +
           " failed because task doesn't exist", e);
     } catch (KeeperException e) {
-      LOG.warn("failed to end task, " + path + " " + ts, e);
+      LOG.warn("failed to end task, " + path + " " + slt, e);
     }
-    tot_wkr_final_transistion_failed.incrementAndGet();
+    SplitLogCounters.tot_wkr_final_transistion_failed.incrementAndGet();
     return;
   }
 
@@ -403,10 +404,17 @@ public class SplitLogWorker extends ZooK
     this.watcher.getRecoverableZooKeeper().getZooKeeper().
       getData(currentTask, this.watcher,
       new GetDataAsyncCallback(), null);
-    tot_wkr_get_data_queued.incrementAndGet();
+    SplitLogCounters.tot_wkr_get_data_queued.incrementAndGet();
   }
 
   void getDataSetWatchSuccess(String path, byte[] data) {
+    SplitLogTask slt;
+    try {
+      slt = SplitLogTask.parseFrom(data);
+    } catch (DeserializationException e) {
+      LOG.warn("Failed parse", e);
+      return;
+    }
     synchronized (grabTaskLock) {
       if (workerInGrabTask) {
         // currentTask can change but that's ok
@@ -418,13 +426,12 @@ public class SplitLogWorker extends ZooK
           // UNASSIGNED because by the time this worker sets the data watch
           // the node might have made two transitions - from owned by this
           // worker to unassigned to owned by another worker
-          if (! TaskState.TASK_OWNED.equals(data, serverName) &&
-              ! TaskState.TASK_DONE.equals(data, serverName) &&
-              ! TaskState.TASK_ERR.equals(data, serverName) &&
-              ! TaskState.TASK_RESIGNED.equals(data, serverName)) {
+          if (! slt.isOwned(this.serverName) &&
+              ! slt.isDone(this.serverName) &&
+              ! slt.isErr(this.serverName) &&
+              ! slt.isResigned(this.serverName)) {
             LOG.info("task " + taskpath + " preempted from " +
-                serverName + ", current task state and owner=" +
-                new String(data));
+                serverName + ", current task state and owner=" + slt.toString());
             stopTask();
           }
         }
@@ -439,7 +446,7 @@ public class SplitLogWorker extends ZooK
         String taskpath = currentTask;
         if (taskpath != null && taskpath.equals(path)) {
           LOG.info("retrying data watch on " + path);
-          tot_wkr_get_data_retry.incrementAndGet();
+          SplitLogCounters.tot_wkr_get_data_retry.incrementAndGet();
           getDataSetWatchAsync();
         } else {
           // no point setting a watch on the task which this worker is not
@@ -543,9 +550,8 @@ public class SplitLogWorker extends ZooK
     private final Log LOG = LogFactory.getLog(GetDataAsyncCallback.class);
 
     @Override
-    public void processResult(int rc, String path, Object ctx, byte[] data,
-        Stat stat) {
-      tot_wkr_get_data_result.incrementAndGet();
+    public void processResult(int rc, String path, Object ctx, byte[] data, Stat stat) {
+      SplitLogCounters.tot_wkr_get_data_result.incrementAndGet();
       if (rc != 0) {
         LOG.warn("getdata rc = " + KeeperException.Code.get(rc) + " " + path);
         getDataSetWatchFailure(path);
@@ -573,4 +579,4 @@ public class SplitLogWorker extends ZooK
     }
     public Status exec(String name, CancelableProgressable p);
   }
-}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java Wed May  2 16:26:36 2012
@@ -39,11 +39,11 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RegionTransition;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
-import org.apache.hadoop.hbase.executor.RegionTransitionData;
 import org.apache.hadoop.hbase.io.Reference.Range;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
@@ -858,12 +858,10 @@ public class SplitTransaction {
   throws KeeperException, IOException {
     LOG.debug(zkw.prefix("Creating ephemeral node for " +
       region.getEncodedName() + " in SPLITTING state"));
-    RegionTransitionData data =
-      new RegionTransitionData(EventType.RS_ZK_REGION_SPLITTING,
+    RegionTransition rt = RegionTransition.createRegionTransition(EventType.RS_ZK_REGION_SPLITTING,
         region.getRegionName(), serverName);
-
     String node = ZKAssign.getNodeName(zkw, region.getEncodedName());
-    if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, data.getBytes())) {
+    if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, rt.toByteArray())) {
       throw new IOException("Failed create of ephemeral " + node);
     }
     // Transition node from SPLITTING to SPLITTING and pick up version so we

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLog.java Wed May  2 16:26:36 2012
@@ -1715,7 +1715,8 @@ public class HLog implements Syncable {
    * Construct the HLog directory name
    *
    * @param serverName Server name formatted as described in {@link ServerName}
-   * @return the HLog directory name
+   * @return the relative HLog directory name, e.g. <code>.logs/1.example.org,6030,12345</code>
+   * if <code>serverName</code> passed is <code>1.example.org,60030,12345</code>
    */
   public static String getHLogDirectoryName(final String serverName) {
     StringBuilder dirName = new StringBuilder(HConstants.HREGION_LOGDIR_NAME);

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogSplitter.java Wed May  2 16:26:36 2012
@@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.HConstant
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.RemoteExceptionHandler;
 import org.apache.hadoop.hbase.io.HeapSize;
+import org.apache.hadoop.hbase.master.SplitLogManager;
 import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -349,17 +350,16 @@ public class HLogSplitter {
   static public boolean splitLogFileToTemp(Path rootDir, String tmpname,
       FileStatus logfile, FileSystem fs,
       Configuration conf, CancelableProgressable reporter) throws IOException {
-    HLogSplitter s = new HLogSplitter(conf, rootDir, null, null /* oldLogDir */,
-        fs);
+    HLogSplitter s = new HLogSplitter(conf, rootDir, null, null /* oldLogDir */, fs);
     return s.splitLogFileToTemp(logfile, tmpname, reporter);
   }
 
   public boolean splitLogFileToTemp(FileStatus logfile, String tmpname,
-      CancelableProgressable reporter)  throws IOException {	    
+      CancelableProgressable reporter)
+  throws IOException {
     final Map<byte[], Object> logWriters = Collections.
     synchronizedMap(new TreeMap<byte[], Object>(Bytes.BYTES_COMPARATOR));
     boolean isCorrupted = false;
-    
     Preconditions.checkState(status == null);
     status = TaskMonitor.get().createStatus(
         "Splitting log file " + logfile.getPath() +
@@ -375,7 +375,7 @@ public class HLogSplitter {
     // How often to send a progress report (default 1/2 the zookeeper session
     // timeout of if that not set, the split log DEFAULT_TIMEOUT)
     int period = conf.getInt("hbase.splitlog.report.period",
-      conf.getInt("hbase.splitlog.manager.timeout", ZKSplitLog.DEFAULT_TIMEOUT) / 2);
+      conf.getInt("hbase.splitlog.manager.timeout", SplitLogManager.DEFAULT_TIMEOUT) / 2);
     int numOpenedFilesBeforeReporting =
       conf.getInt("hbase.splitlog.report.openedfiles", 3);
     Path logPath = logfile.getPath();
@@ -474,8 +474,8 @@ public class HLogSplitter {
         WriterAndPath wap = (WriterAndPath)o;
         wap.w.close();
         LOG.debug("Closed " + wap.p);
-        Path dst = getCompletedRecoveredEditsFilePath(wap.p, outputSink
-            .getRegionMaximumEditLogSeqNum(logWritersEntry.getKey()));
+        Path dst = getCompletedRecoveredEditsFilePath(wap.p,
+          outputSink.getRegionMaximumEditLogSeqNum(logWritersEntry.getKey()));
         if (!dst.equals(wap.p) && fs.exists(dst)) {
           LOG.warn("Found existing old edits file. It could be the "
               + "result of a previous failed split attempt. Deleting " + dst
@@ -658,10 +658,9 @@ public class HLogSplitter {
   static Path getRegionSplitEditsPath(final FileSystem fs,
       final Entry logEntry, final Path rootDir, boolean isCreate)
   throws IOException {
-    Path tableDir = HTableDescriptor.getTableDir(rootDir, logEntry.getKey()
-        .getTablename());
+    Path tableDir = HTableDescriptor.getTableDir(rootDir, logEntry.getKey().getTablename());
     Path regiondir = HRegion.getRegionDir(tableDir,
-        Bytes.toString(logEntry.getKey().getEncodedRegionName()));
+      Bytes.toString(logEntry.getKey().getEncodedRegionName()));
     Path dir = HLog.getRegionDirRecoveredEditsDir(regiondir);
 
     if (!fs.exists(regiondir)) {
@@ -675,8 +674,7 @@ public class HLogSplitter {
     }
     // Append file name ends with RECOVERED_LOG_TMPFILE_SUFFIX to ensure
     // region's replayRecoveredEdits will not delete it
-    String fileName = formatRecoveredEditsFileName(logEntry.getKey()
-        .getLogSeqNum());
+    String fileName = formatRecoveredEditsFileName(logEntry.getKey().getLogSeqNum());
     fileName = getTmpRecoveredEditsFileName(fileName);
     return new Path(dir, fileName);
   }
@@ -1080,8 +1078,7 @@ public class HLogSplitter {
   private WriterAndPath createWAP(byte[] region, Entry entry,
       Path rootdir, String tmpname, FileSystem fs, Configuration conf)
   throws IOException {
-    Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir,
-        tmpname==null);
+    Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, tmpname==null);
     if (regionedits == null) {
       return null;
     }

Added: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java?rev=1333099&view=auto
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java (added)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java Wed May  2 16:26:36 2012
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.zookeeper.Watcher;
+import org.apache.zookeeper.WatchedEvent;
+
+/**
+ * An empty ZooKeeper watcher
+ */
+@InterfaceAudience.Private
+public class EmptyWatcher implements Watcher {
+  // Used in this package but also by tests so needs to be public
+  public static EmptyWatcher instance = new EmptyWatcher();
+  private EmptyWatcher() {}
+
+  public void process(WatchedEvent event) {}
+}
\ No newline at end of file

Modified: hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java?rev=1333099&r1=1333098&r2=1333099&view=diff
==============================================================================
--- hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java (original)
+++ hbase/trunk/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java Wed May  2 16:26:36 2012
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.DeserializationException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
@@ -80,7 +81,12 @@ public class MasterAddressTracker extend
    * @return Server name or null if timed out.
    */
   public ServerName getMasterAddress(final boolean refresh) {
-    return ZKUtil.znodeContentToServerName(super.getData(refresh));
+    try {
+      return ServerName.parseFrom(super.getData(refresh));
+    } catch (DeserializationException e) {
+      LOG.warn("Failed parse", e);
+      return null;
+    }
   }
 
   /**
@@ -99,7 +105,13 @@ public class MasterAddressTracker extend
     if (data == null){
       throw new IOException("Can't get master address from ZooKeeper; znode data == null");
     }
-    return ZKUtil.znodeContentToServerName(data);
+    try {
+      return ServerName.parseFrom(data);
+    } catch (DeserializationException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    }
   }
 
   /**
@@ -116,7 +128,7 @@ public class MasterAddressTracker extend
   public static boolean setMasterAddress(final ZooKeeperWatcher zkw,
       final String znode, final ServerName master)
   throws KeeperException {
-    return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, getZNodeData(master));
+    return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master));
   }
 
   /**
@@ -132,7 +144,7 @@ public class MasterAddressTracker extend
    * @return Content of the master znode as a serialized pb with the pb
    * magic as prefix.
    */
-   static byte [] getZNodeData(final ServerName sn) {
+   static byte [] toByteArray(final ServerName sn) {
      ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder();
      HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder();
      snbuilder.setHostName(sn.getHostname());