You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ec...@apache.org on 2013/02/25 23:50:29 UTC

svn commit: r1449950 [24/35] - in /hbase/trunk: ./ hbase-client/ hbase-client/src/ hbase-client/src/main/ hbase-client/src/main/java/ hbase-client/src/main/java/org/ hbase-client/src/main/java/org/apache/ hbase-client/src/main/java/org/apache/hadoop/ h...

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,307 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Represents an authorization for access for the given actions, optionally
+ * restricted to the given column family or column qualifier, over the
+ * given table.  If the family property is <code>null</code>, it implies
+ * full table access.
+ */
+public class TablePermission extends Permission {
+  private static Log LOG = LogFactory.getLog(TablePermission.class);
+
+  private byte[] table;
+  private byte[] family;
+  private byte[] qualifier;
+
+  /** Nullary constructor for Writable, do not use */
+  public TablePermission() {
+    super();
+  }
+
+  /**
+   * Create a new permission for the given table and (optionally) column family,
+   * allowing the given actions.
+   * @param table the table
+   * @param family the family, can be null if a global permission on the table
+   * @param assigned the list of allowed actions
+   */
+  public TablePermission(byte[] table, byte[] family, Action... assigned) {
+    this(table, family, null, assigned);
+  }
+
+  /**
+   * Creates a new permission for the given table, restricted to the given
+   * column family and qualifer, allowing the assigned actions to be performed.
+   * @param table the table
+   * @param family the family, can be null if a global permission on the table
+   * @param assigned the list of allowed actions
+   */
+  public TablePermission(byte[] table, byte[] family, byte[] qualifier,
+      Action... assigned) {
+    super(assigned);
+    this.table = table;
+    this.family = family;
+    this.qualifier = qualifier;
+  }
+
+  /**
+   * Creates a new permission for the given table, family and column qualifier,
+   * allowing the actions matching the provided byte codes to be performed.
+   * @param table the table
+   * @param family the family, can be null if a global permission on the table
+   * @param actionCodes the list of allowed action codes
+   */
+  public TablePermission(byte[] table, byte[] family, byte[] qualifier,
+      byte[] actionCodes) {
+    super(actionCodes);
+    this.table = table;
+    this.family = family;
+    this.qualifier = qualifier;
+  }
+
+  public boolean hasTable() {
+    return table != null;
+  }
+
+  public byte[] getTable() {
+    return table;
+  }
+
+  public boolean hasFamily() {
+    return family != null;
+  }
+
+  public byte[] getFamily() {
+    return family;
+  }
+
+  public boolean hasQualifier() {
+    return qualifier != null;
+  }
+
+  public byte[] getQualifier() {
+    return qualifier;
+  }
+
+  /**
+   * Checks that a given table operation is authorized by this permission
+   * instance.
+   *
+   * @param table the table where the operation is being performed
+   * @param family the column family to which the operation is restricted,
+   *   if <code>null</code> implies "all"
+   * @param qualifier the column qualifier to which the action is restricted,
+   *   if <code>null</code> implies "all"
+   * @param action the action being requested
+   * @return <code>true</code> if the action within the given scope is allowed
+   *   by this permission, <code>false</code>
+   */
+  public boolean implies(byte[] table, byte[] family, byte[] qualifier,
+      Action action) {
+    if (!Bytes.equals(this.table, table)) {
+      return false;
+    }
+
+    if (this.family != null &&
+        (family == null ||
+         !Bytes.equals(this.family, family))) {
+      return false;
+    }
+
+    if (this.qualifier != null &&
+        (qualifier == null ||
+         !Bytes.equals(this.qualifier, qualifier))) {
+      return false;
+    }
+
+    // check actions
+    return super.implies(action);
+  }
+
+  /**
+   * Checks if this permission grants access to perform the given action on
+   * the given table and key value.
+   * @param table the table on which the operation is being performed
+   * @param kv the KeyValue on which the operation is being requested
+   * @param action the action requested
+   * @return <code>true</code> if the action is allowed over the given scope
+   *   by this permission, otherwise <code>false</code>
+   */
+  public boolean implies(byte[] table, KeyValue kv, Action action) {
+    if (!Bytes.equals(this.table, table)) {
+      return false;
+    }
+
+    if (family != null &&
+        (Bytes.compareTo(family, 0, family.length,
+            kv.getBuffer(), kv.getFamilyOffset(), kv.getFamilyLength()) != 0)) {
+      return false;
+    }
+
+    if (qualifier != null &&
+        (Bytes.compareTo(qualifier, 0, qualifier.length,
+            kv.getBuffer(), kv.getQualifierOffset(), kv.getQualifierLength()) != 0)) {
+      return false;
+    }
+
+    // check actions
+    return super.implies(action);
+  }
+
+  /**
+   * Returns <code>true</code> if this permission matches the given column
+   * family at least.  This only indicates a partial match against the table
+   * and column family, however, and does not guarantee that implies() for the
+   * column same family would return <code>true</code>.  In the case of a
+   * column-qualifier specific permission, for example, implies() would still
+   * return false.
+   */
+  public boolean matchesFamily(byte[] table, byte[] family, Action action) {
+    if (!Bytes.equals(this.table, table)) {
+      return false;
+    }
+
+    if (this.family != null &&
+        (family == null ||
+         !Bytes.equals(this.family, family))) {
+      return false;
+    }
+
+    // ignore qualifier
+    // check actions
+    return super.implies(action);
+  }
+
+  /**
+   * Returns if the given permission matches the given qualifier.
+   * @param table the table name to match
+   * @param family the column family to match
+   * @param qualifier the qualifier name to match
+   * @param action the action requested
+   * @return <code>true</code> if the table, family and qualifier match,
+   *   otherwise <code>false</code>
+   */
+  public boolean matchesFamilyQualifier(byte[] table, byte[] family, byte[] qualifier,
+                                Action action) {
+    if (!matchesFamily(table, family, action)) {
+      return false;
+    } else {
+      if (this.qualifier != null &&
+          (qualifier == null ||
+           !Bytes.equals(this.qualifier, qualifier))) {
+        return false;
+      }
+    }
+    return super.implies(action);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof TablePermission)) {
+      return false;
+    }
+    TablePermission other = (TablePermission)obj;
+
+    if (!(Bytes.equals(table, other.getTable()) &&
+        ((family == null && other.getFamily() == null) ||
+         Bytes.equals(family, other.getFamily())) &&
+        ((qualifier == null && other.getQualifier() == null) ||
+         Bytes.equals(qualifier, other.getQualifier()))
+       )) {
+      return false;
+    }
+
+    // check actions
+    return super.equals(other);
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 37;
+    int result = super.hashCode();
+    if (table != null) {
+      result = prime * result + Bytes.hashCode(table);
+    }
+    if (family != null) {
+      result = prime * result + Bytes.hashCode(family);
+    }
+    if (qualifier != null) {
+      result = prime * result + Bytes.hashCode(qualifier);
+    }
+    return result;
+  }
+
+  public String toString() {
+    StringBuilder str = new StringBuilder("[TablePermission: ")
+        .append("table=").append(Bytes.toString(table))
+        .append(", family=").append(Bytes.toString(family))
+        .append(", qualifier=").append(Bytes.toString(qualifier))
+        .append(", actions=");
+    if (actions != null) {
+      for (int i=0; i<actions.length; i++) {
+        if (i > 0)
+          str.append(",");
+        if (actions[i] != null)
+          str.append(actions[i].toString());
+        else
+          str.append("NULL");
+      }
+    }
+    str.append("]");
+
+    return str.toString();
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    table = Bytes.readByteArray(in);
+    if (in.readBoolean()) {
+      family = Bytes.readByteArray(in);
+    }
+    if (in.readBoolean()) {
+      qualifier = Bytes.readByteArray(in);
+    }
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    Bytes.writeByteArray(out, table);
+    out.writeBoolean(family != null);
+    if (family != null) {
+      Bytes.writeByteArray(out, family);
+    }
+    out.writeBoolean(qualifier != null);
+    if (qualifier != null) {
+      Bytes.writeByteArray(out, qualifier);
+    }
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,167 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.util.Bytes;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Represents an authorization for access over the given table, column family
+ * plus qualifier, for the given user.
+ */
+public class UserPermission extends TablePermission {
+  private static Log LOG = LogFactory.getLog(UserPermission.class);
+
+  private byte[] user;
+
+  /** Nullary constructor for Writable, do not use */
+  public UserPermission() {
+    super();
+  }
+
+  /**
+   * Creates a new instance for the given user.
+   * @param user the user
+   * @param assigned the list of allowed actions
+   */
+  public UserPermission(byte[] user, Action... assigned) {
+    super(null, null, null, assigned);
+    this.user = user;
+  }
+
+  /**
+   * Creates a new instance for the given user,
+   * matching the actions with the given codes.
+   * @param user the user
+   * @param actionCodes the list of allowed action codes
+   */
+  public UserPermission(byte[] user, byte[] actionCodes) {
+    super(null, null, null, actionCodes);
+    this.user = user;
+  }
+
+  /**
+   * Creates a new instance for the given user, table and column family.
+   * @param user the user
+   * @param table the table
+   * @param family the family, can be null if action is allowed over the entire
+   *   table
+   * @param assigned the list of allowed actions
+   */
+  public UserPermission(byte[] user, byte[] table, byte[] family,
+                        Action... assigned) {
+    super(table, family, assigned);
+    this.user = user;
+  }
+
+  /**
+   * Creates a new permission for the given user, table, column family and
+   * column qualifier.
+   * @param user the user
+   * @param table the table
+   * @param family the family, can be null if action is allowed over the entire
+   *   table
+   * @param qualifier the column qualifier, can be null if action is allowed
+   *   over the entire column family
+   * @param assigned the list of allowed actions
+   */
+  public UserPermission(byte[] user, byte[] table, byte[] family,
+                        byte[] qualifier, Action... assigned) {
+    super(table, family, qualifier, assigned);
+    this.user = user;
+  }
+
+  /**
+   * Creates a new instance for the given user, table, column family and
+   * qualifier, matching the actions with the given codes.
+   * @param user the user
+   * @param table the table
+   * @param family the family, can be null if action is allowed over the entire
+   *   table
+   * @param qualifier the column qualifier, can be null if action is allowed
+   *   over the entire column family
+   * @param actionCodes the list of allowed action codes
+   */
+  public UserPermission(byte[] user, byte[] table, byte[] family,
+                        byte[] qualifier, byte[] actionCodes) {
+    super(table, family, qualifier, actionCodes);
+    this.user = user;
+  }
+
+  public byte[] getUser() {
+    return user;
+  }
+
+  /**
+   * Returns true if this permission describes a global user permission.
+   */
+  public boolean isGlobal() {
+    byte[] tableName = getTable();
+    return(tableName == null || tableName.length == 0);
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (!(obj instanceof UserPermission)) {
+      return false;
+    }
+    UserPermission other = (UserPermission)obj;
+
+    if ((Bytes.equals(user, other.getUser()) &&
+        super.equals(obj))) {
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  @Override
+  public int hashCode() {
+    final int prime = 37;
+    int result = super.hashCode();
+    if (user != null) {
+      result = prime * result + Bytes.hashCode(user);
+    }
+    return result;
+  }
+
+  public String toString() {
+    StringBuilder str = new StringBuilder("UserPermission: ")
+        .append("user=").append(Bytes.toString(user))
+        .append(", ").append(super.toString());
+    return str.toString();
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    user = Bytes.readByteArray(in);
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    Bytes.writeByteArray(out, user);
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+/**
+ * Represents the identity information stored in an HBase authentication token.
+ */
+public class AuthenticationTokenIdentifier extends TokenIdentifier {
+  public static final Text AUTH_TOKEN_TYPE = new Text("HBASE_AUTH_TOKEN");
+
+  protected String username;
+  protected int keyId;
+  protected long issueDate;
+  protected long expirationDate;
+  protected long sequenceNumber;
+  
+  public AuthenticationTokenIdentifier() {
+  }
+
+  public AuthenticationTokenIdentifier(String username) {
+    this.username = username;
+  }
+
+  public AuthenticationTokenIdentifier(String username, int keyId,
+      long issueDate, long expirationDate) {
+    this.username = username;
+    this.keyId = keyId;
+    this.issueDate = issueDate;
+    this.expirationDate = expirationDate;
+  }
+
+  @Override
+  public Text getKind() {
+    return AUTH_TOKEN_TYPE;
+  }
+
+  @Override
+  public UserGroupInformation getUser() {
+    if (username == null || "".equals(username)) {
+      return null;
+    }
+    return UserGroupInformation.createRemoteUser(username);
+  }
+
+  public String getUsername() {
+    return username;
+  }
+
+  void setUsername(String name) {
+    this.username = name;
+  }
+
+  public int getKeyId() {
+    return keyId;
+  }
+
+  void setKeyId(int id) {
+    this.keyId = id;
+  }
+
+  public long getIssueDate() {
+    return issueDate;
+  }
+
+  void setIssueDate(long timestamp) {
+    this.issueDate = timestamp;
+  }
+
+  public long getExpirationDate() {
+    return expirationDate;
+  }
+
+  void setExpirationDate(long timestamp) {
+    this.expirationDate = timestamp;
+  }
+
+  public long getSequenceNumber() {
+    return sequenceNumber;
+  }
+
+  void setSequenceNumber(long seq) {
+    this.sequenceNumber = seq;
+  }
+
+  public byte[] toBytes() {
+    AuthenticationProtos.TokenIdentifier.Builder builder =
+        AuthenticationProtos.TokenIdentifier.newBuilder();
+    builder.setKind(AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN);
+    if (username != null) {
+      builder.setUsername(ByteString.copyFromUtf8(username));
+    }
+    builder.setIssueDate(issueDate)
+        .setExpirationDate(expirationDate)
+        .setKeyId(keyId)
+        .setSequenceNumber(sequenceNumber);
+    return builder.build().toByteArray();
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    byte[] pbBytes = toBytes();
+    out.writeInt(pbBytes.length);
+    out.write(pbBytes);
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    int len = in.readInt();
+    byte[] inBytes = new byte[len];
+    in.readFully(inBytes);
+    AuthenticationProtos.TokenIdentifier identifier =
+        AuthenticationProtos.TokenIdentifier.newBuilder().mergeFrom(inBytes).build();
+    // sanity check on type
+    if (!identifier.hasKind() ||
+        identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) {
+      throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind());
+    }
+
+    // copy the field values
+    if (identifier.hasUsername()) {
+      username = identifier.getUsername().toStringUtf8();
+    }
+    if (identifier.hasKeyId()) {
+      keyId = identifier.getKeyId();
+    }
+    if (identifier.hasIssueDate()) {
+      issueDate = identifier.getIssueDate();
+    }
+    if (identifier.hasExpirationDate()) {
+      expirationDate = identifier.getExpirationDate();
+    }
+    if (identifier.hasSequenceNumber()) {
+      sequenceNumber = identifier.getSequenceNumber();
+    }
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null) {
+      return false;
+    }
+    if (other instanceof AuthenticationTokenIdentifier) {
+      AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other;
+      return sequenceNumber == ident.getSequenceNumber()
+          && keyId == ident.getKeyId()
+          && issueDate == ident.getIssueDate()
+          && expirationDate == ident.getExpirationDate()
+          && (username == null ? ident.getUsername() == null :
+              username.equals(ident.getUsername()));
+    }
+    return false;
+  }
+
+  @Override
+  public int hashCode() {
+    return (int)sequenceNumber;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.token;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenSelector;
+
+import java.util.Collection;
+
+public class AuthenticationTokenSelector
+    implements TokenSelector<AuthenticationTokenIdentifier> {
+  private static Log LOG = LogFactory.getLog(AuthenticationTokenSelector.class);
+
+  public AuthenticationTokenSelector() {
+  }
+
+  @Override
+  public Token<AuthenticationTokenIdentifier> selectToken(Text serviceName,
+      Collection<Token<? extends TokenIdentifier>> tokens) {
+    if (serviceName != null) {
+      for (Token ident : tokens) {
+        if (serviceName.equals(ident.getService()) &&
+            AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Returning token "+ident);
+          }
+          return (Token<AuthenticationTokenIdentifier>)ident;
+        }
+      }
+    }
+    LOG.debug("No matching token found");
+    return null;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,66 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.snapshot;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.util.Bytes;
+
+/**
+ * Class to help with dealing with a snapshot description on the client side.
+ * There is a corresponding class on the server side.
+ */
+@InterfaceAudience.Private
+public class ClientSnapshotDescriptionUtils {
+  /**
+   * Check to make sure that the description of the snapshot requested is valid
+   * @param snapshot description of the snapshot
+   * @throws IllegalArgumentException if the name of the snapshot or the name of the table to
+   *           snapshot are not valid names.
+   */
+  public static void assertSnapshotRequestIsValid(HBaseProtos.SnapshotDescription snapshot)
+      throws IllegalArgumentException {
+    // FIXME these method names is really bad - trunk will probably change
+    // .META. and -ROOT- snapshots are not allowed
+    if (HTableDescriptor.isMetaTable(Bytes.toBytes(snapshot.getTable()))) {
+      throw new IllegalArgumentException(".META. and -ROOT- snapshots are not allowed");
+    }
+    // make sure the snapshot name is valid
+    HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getName()));
+    // make sure the table name is valid
+    HTableDescriptor.isLegalTableName(Bytes.toBytes(snapshot.getTable()));
+  }
+
+  /**
+   * Returns a single line (no \n) representation of snapshot metadata.  Use this instead of
+   * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription#toString()}.  We don't replace SnapshotDescrpition's toString
+   * because it is auto-generated by protoc.
+   * @param ssd
+   * @return Single line string with a summary of the snapshot parameters
+   */
+  public static String toString(HBaseProtos.SnapshotDescription ssd) {
+    if (ssd == null) {
+      return null;
+    }
+    return "{ ss=" + ssd.getName() + " table=" + ssd.getTable()
+        + " type=" + ssd.getType() + " }";
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/HasThread.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/HasThread.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/HasThread.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/HasThread.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+import java.lang.Thread.UncaughtExceptionHandler;
+
+/**
+ * Abstract class which contains a Thread and delegates the common Thread
+ * methods to that instance.
+ * 
+ * The purpose of this class is to workaround Sun JVM bug #6915621, in which
+ * something internal to the JDK uses Thread.currentThread() as a monitor
+ * lock. This can produce deadlocks like HBASE-4367, HBASE-4101, etc.
+ */
+@InterfaceAudience.Private
+public abstract class HasThread implements Runnable {
+  private final Thread thread;
+  
+  public HasThread() {
+    this.thread = new Thread(this);
+  }
+
+  public HasThread(String name) {
+    this.thread = new Thread(this, name);
+  }
+  
+  public Thread getThread() {
+    return thread;
+  }
+  
+  public abstract void run();
+  
+  //// Begin delegation to Thread
+  
+  public final String getName() {
+    return thread.getName();
+  }
+
+  public void interrupt() {
+    thread.interrupt();
+  }
+
+  public final boolean isAlive() {
+    return thread.isAlive();
+  }
+
+  public boolean isInterrupted() {
+    return thread.isInterrupted();
+  }
+
+  public final void setDaemon(boolean on) {
+    thread.setDaemon(on);
+  }
+
+  public final void setName(String name) {
+    thread.setName(name);
+  }
+
+  public final void setPriority(int newPriority) {
+    thread.setPriority(newPriority);
+  }
+
+  public void setUncaughtExceptionHandler(UncaughtExceptionHandler eh) {
+    thread.setUncaughtExceptionHandler(eh);
+  }
+
+  public void start() {
+    thread.start();
+  }
+  
+  public final void join() throws InterruptedException {
+    thread.join();
+  }
+
+  public final void join(long millis, int nanos) throws InterruptedException {
+    thread.join(millis, nanos);
+  }
+
+  public final void join(long millis) throws InterruptedException {
+    thread.join(millis);
+  }
+  //// End delegation to Thread
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,451 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ *
+ * The <code>PoolMap</code> maps a key to a collection of values, the elements
+ * of which are managed by a pool. In effect, that collection acts as a shared
+ * pool of resources, access to which is closely controlled as per the semantics
+ * of the pool.
+ *
+ * <p>
+ * In case the size of the pool is set to a non-zero positive number, that is
+ * used to cap the number of resources that a pool may contain for any given
+ * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool.
+ * </p>
+ *
+ * @param <K>
+ *          the type of the key to the resource
+ * @param <V>
+ *          the type of the resource being pooled
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class PoolMap<K, V> implements Map<K, V> {
+  private PoolType poolType;
+
+  private int poolMaxSize;
+
+  private Map<K, Pool<V>> pools = new ConcurrentHashMap<K, Pool<V>>();
+
+  public PoolMap(PoolType poolType) {
+    this.poolType = poolType;
+  }
+
+  public PoolMap(PoolType poolType, int poolMaxSize) {
+    this.poolType = poolType;
+    this.poolMaxSize = poolMaxSize;
+  }
+
+  @Override
+  public V get(Object key) {
+    Pool<V> pool = pools.get(key);
+    return pool != null ? pool.get() : null;
+  }
+
+  @Override
+  public V put(K key, V value) {
+    Pool<V> pool = pools.get(key);
+    if (pool == null) {
+      pools.put(key, pool = createPool());
+    }
+    return pool != null ? pool.put(value) : null;
+  }
+
+  @SuppressWarnings("unchecked")
+  @Override
+  public V remove(Object key) {
+    Pool<V> pool = pools.remove(key);
+    if (pool != null) {
+      remove((K) key, pool.get());
+    }
+    return null;
+  }
+
+  public boolean remove(K key, V value) {
+    Pool<V> pool = pools.get(key);
+    boolean res = false;
+    if (pool != null) {
+      res = pool.remove(value);
+      if (res && pool.size() == 0) {
+        pools.remove(key);
+      }
+    }
+    return res;
+  }
+
+  @Override
+  public Collection<V> values() {
+    Collection<V> values = new ArrayList<V>();
+    for (Pool<V> pool : pools.values()) {
+      Collection<V> poolValues = pool.values();
+      if (poolValues != null) {
+        values.addAll(poolValues);
+      }
+    }
+    return values;
+  }
+
+  public Collection<V> values(K key) {
+    Collection<V> values = new ArrayList<V>();
+    Pool<V> pool = pools.get(key);
+    if (pool != null) {
+      Collection<V> poolValues = pool.values();
+      if (poolValues != null) {
+        values.addAll(poolValues);
+      }
+    }
+    return values;
+  }
+
+
+  @Override
+  public boolean isEmpty() {
+    return pools.isEmpty();
+  }
+
+  @Override
+  public int size() {
+    return pools.size();
+  }
+
+  public int size(K key) {
+    Pool<V> pool = pools.get(key);
+    return pool != null ? pool.size() : 0;
+  }
+
+  @Override
+  public boolean containsKey(Object key) {
+    return pools.containsKey(key);
+  }
+
+  @Override
+  public boolean containsValue(Object value) {
+    if (value == null) {
+      return false;
+    }
+    for (Pool<V> pool : pools.values()) {
+      if (value.equals(pool.get())) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  @Override
+  public void putAll(Map<? extends K, ? extends V> map) {
+    for (Map.Entry<? extends K, ? extends V> entry : map.entrySet()) {
+      put(entry.getKey(), entry.getValue());
+    }
+  }
+
+  @Override
+  public void clear() {
+    for (Pool<V> pool : pools.values()) {
+      pool.clear();
+    }
+    pools.clear();
+  }
+
+  @Override
+  public Set<K> keySet() {
+    return pools.keySet();
+  }
+
+  @Override
+  public Set<Map.Entry<K, V>> entrySet() {
+    Set<Map.Entry<K, V>> entries = new HashSet<Entry<K, V>>();
+    for (Map.Entry<K, Pool<V>> poolEntry : pools.entrySet()) {
+      final K poolKey = poolEntry.getKey();
+      final Pool<V> pool = poolEntry.getValue();
+      if (pool != null) {
+        for (final V poolValue : pool.values()) {
+          entries.add(new Map.Entry<K, V>() {
+            @Override
+            public K getKey() {
+              return poolKey;
+            }
+
+            @Override
+            public V getValue() {
+              return poolValue;
+            }
+
+            @Override
+            public V setValue(V value) {
+              return pool.put(value);
+            }
+          });
+        }
+      }
+    }
+    return null;
+  }
+
+  protected interface Pool<R> {
+    public R get();
+
+    public R put(R resource);
+
+    public boolean remove(R resource);
+
+    public void clear();
+
+    public Collection<R> values();
+
+    public int size();
+  }
+
+  public enum PoolType {
+    Reusable, ThreadLocal, RoundRobin;
+
+    public static PoolType valueOf(String poolTypeName,
+        PoolType defaultPoolType, PoolType... allowedPoolTypes) {
+      PoolType poolType = PoolType.fuzzyMatch(poolTypeName);
+      if (poolType != null) {
+        boolean allowedType = false;
+        if (poolType.equals(defaultPoolType)) {
+          allowedType = true;
+        } else {
+          if (allowedPoolTypes != null) {
+            for (PoolType allowedPoolType : allowedPoolTypes) {
+              if (poolType.equals(allowedPoolType)) {
+                allowedType = true;
+                break;
+              }
+            }
+          }
+        }
+        if (!allowedType) {
+          poolType = null;
+        }
+      }
+      return (poolType != null) ? poolType : defaultPoolType;
+    }
+
+    public static String fuzzyNormalize(String name) {
+      return name != null ? name.replaceAll("-", "").trim().toLowerCase() : "";
+    }
+
+    public static PoolType fuzzyMatch(String name) {
+      for (PoolType poolType : values()) {
+        if (fuzzyNormalize(name).equals(fuzzyNormalize(poolType.name()))) {
+          return poolType;
+        }
+      }
+      return null;
+    }
+  }
+
+  protected Pool<V> createPool() {
+    switch (poolType) {
+    case Reusable:
+      return new ReusablePool<V>(poolMaxSize);
+    case RoundRobin:
+      return new RoundRobinPool<V>(poolMaxSize);
+    case ThreadLocal:
+      return new ThreadLocalPool<V>();
+    }
+    return null;
+  }
+
+  /**
+   * The <code>ReusablePool</code> represents a {@link PoolMap.Pool} that builds
+   * on the {@link LinkedList} class. It essentially allows resources to be
+   * checked out, at which point it is removed from this pool. When the resource
+   * is no longer required, it should be returned to the pool in order to be
+   * reused.
+   *
+   * <p>
+   * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of
+   * the pool is unbounded. Otherwise, it caps the number of consumers that can
+   * check out a resource from this pool to the (non-zero positive) value
+   * specified in {@link #maxSize}.
+   * </p>
+   *
+   * @param <R>
+   *          the type of the resource
+   */
+  @SuppressWarnings("serial")
+  public class ReusablePool<R> extends ConcurrentLinkedQueue<R> implements Pool<R> {
+    private int maxSize;
+
+    public ReusablePool(int maxSize) {
+      this.maxSize = maxSize;
+
+    }
+
+    @Override
+    public R get() {
+      return poll();
+    }
+
+    @Override
+    public R put(R resource) {
+      if (super.size() < maxSize) {
+        add(resource);
+      }
+      return null;
+    }
+
+    @Override
+    public Collection<R> values() {
+      return this;
+    }
+  }
+
+  /**
+   * The <code>RoundRobinPool</code> represents a {@link PoolMap.Pool}, which
+   * stores its resources in an {@link ArrayList}. It load-balances access to
+   * its resources by returning a different resource every time a given key is
+   * looked up.
+   *
+   * <p>
+   * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of
+   * the pool is unbounded. Otherwise, it caps the number of resources in this
+   * pool to the (non-zero positive) value specified in {@link #maxSize}.
+   * </p>
+   *
+   * @param <R>
+   *          the type of the resource
+   *
+   */
+  @SuppressWarnings("serial")
+  class RoundRobinPool<R> extends CopyOnWriteArrayList<R> implements Pool<R> {
+    private int maxSize;
+    private int nextResource = 0;
+
+    public RoundRobinPool(int maxSize) {
+      this.maxSize = maxSize;
+    }
+
+    @Override
+    public R put(R resource) {
+      if (super.size() < maxSize) {
+        add(resource);
+      }
+      return null;
+    }
+
+    @Override
+    public R get() {
+      if (super.size() < maxSize) {
+        return null;
+      }
+      nextResource %= super.size();
+      R resource = get(nextResource++);
+      return resource;
+    }
+
+    @Override
+    public Collection<R> values() {
+      return this;
+    }
+
+  }
+
+  /**
+   * The <code>ThreadLocalPool</code> represents a {@link PoolMap.Pool} that
+   * builds on the {@link ThreadLocal} class. It essentially binds the resource
+   * to the thread from which it is accessed.
+   *
+   * <p>
+   * Note that the size of the pool is essentially bounded by the number of threads
+   * that add resources to this pool.
+   * </p>
+   *
+   * @param <R>
+   *          the type of the resource
+   */
+  static class ThreadLocalPool<R> extends ThreadLocal<R> implements Pool<R> {
+    private static final Map<ThreadLocalPool<?>, AtomicInteger> poolSizes = new HashMap<ThreadLocalPool<?>, AtomicInteger>();
+
+    public ThreadLocalPool() {
+    }
+
+    @Override
+    public R put(R resource) {
+      R previousResource = get();
+      if (previousResource == null) {
+        AtomicInteger poolSize = poolSizes.get(this);
+        if (poolSize == null) {
+          poolSizes.put(this, poolSize = new AtomicInteger(0));
+        }
+        poolSize.incrementAndGet();
+      }
+      this.set(resource);
+      return previousResource;
+    }
+
+    @Override
+    public void remove() {
+      super.remove();
+      AtomicInteger poolSize = poolSizes.get(this);
+      if (poolSize != null) {
+        poolSize.decrementAndGet();
+      }
+    }
+
+    @Override
+    public int size() {
+      AtomicInteger poolSize = poolSizes.get(this);
+      return poolSize != null ? poolSize.get() : 0;
+    }
+
+    @Override
+    public boolean remove(R resource) {
+      R previousResource = super.get();
+      if (resource != null && resource.equals(previousResource)) {
+        remove();
+        return true;
+      } else {
+        return false;
+      }
+    }
+
+    @Override
+    public void clear() {
+      super.remove();
+    }
+
+    @Override
+    public Collection<R> values() {
+      List<R> values = new ArrayList<R>();
+      values.add(get());
+      return values;
+    }
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Sleeper.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,117 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hbase.Stoppable;
+
+/**
+ * Sleeper for current thread.
+ * Sleeps for passed period.  Also checks passed boolean and if interrupted,
+ * will return if the flag is set (rather than go back to sleep until its
+ * sleep time is up).
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class Sleeper {
+  private final Log LOG = LogFactory.getLog(this.getClass().getName());
+  private final int period;
+  private final Stoppable stopper;
+  private static final long MINIMAL_DELTA_FOR_LOGGING = 10000;
+
+  private final Object sleepLock = new Object();
+  private boolean triggerWake = false;
+
+  /**
+   * @param sleep sleep time in milliseconds
+   * @param stopper When {@link Stoppable#isStopped()} is true, this thread will
+   * cleanup and exit cleanly.
+   */
+  public Sleeper(final int sleep, final Stoppable stopper) {
+    this.period = sleep;
+    this.stopper = stopper;
+  }
+
+  /**
+   * Sleep for period.
+   */
+  public void sleep() {
+    sleep(System.currentTimeMillis());
+  }
+
+  /**
+   * If currently asleep, stops sleeping; if not asleep, will skip the next
+   * sleep cycle.
+   */
+  public void skipSleepCycle() {
+    synchronized (sleepLock) {
+      triggerWake = true;
+      sleepLock.notifyAll();
+    }
+  }
+
+  /**
+   * Sleep for period adjusted by passed <code>startTime<code>
+   * @param startTime Time some task started previous to now.  Time to sleep
+   * will be docked current time minus passed <code>startTime<code>.
+   */
+  public void sleep(final long startTime) {
+    if (this.stopper.isStopped()) {
+      return;
+    }
+    long now = System.currentTimeMillis();
+    long waitTime = this.period - (now - startTime);
+    if (waitTime > this.period) {
+      LOG.warn("Calculated wait time > " + this.period +
+        "; setting to this.period: " + System.currentTimeMillis() + ", " +
+        startTime);
+      waitTime = this.period;
+    }
+    while (waitTime > 0) {
+      long woke = -1;
+      try {
+        synchronized (sleepLock) {
+          if (triggerWake) break;
+          sleepLock.wait(waitTime);
+        }
+        woke = System.currentTimeMillis();
+        long slept = woke - now;
+        if (slept - this.period > MINIMAL_DELTA_FOR_LOGGING) {
+          LOG.warn("We slept " + slept + "ms instead of " + this.period +
+              "ms, this is likely due to a long " +
+              "garbage collecting pause and it's usually bad, see " +
+              "http://hbase.apache.org/book.html#trouble.rs.runtime.zkexpired");
+        }
+      } catch(InterruptedException iex) {
+        // We we interrupted because we're meant to stop?  If not, just
+        // continue ignoring the interruption
+        if (this.stopper.isStopped()) {
+          return;
+        }
+      }
+      // Recalculate waitTime.
+      woke = (woke == -1)? System.currentTimeMillis(): woke;
+      waitTime = this.period - (woke - startTime);
+    }
+    triggerWake = false;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,167 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.Writable;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Utility class with methods for manipulating Writable objects
+ */
+@InterfaceAudience.Private
+public class Writables {
+  /**
+   * @param w writable
+   * @return The bytes of <code>w</code> gotten by running its
+   * {@link Writable#write(java.io.DataOutput)} method.
+   * @throws IOException e
+   * @see #getWritable(byte[], Writable)
+   */
+  public static byte [] getBytes(final Writable w) throws IOException {
+    if (w == null) {
+      throw new IllegalArgumentException("Writable cannot be null");
+    }
+    ByteArrayOutputStream byteStream = new ByteArrayOutputStream();
+    DataOutputStream out = new DataOutputStream(byteStream);
+    try {
+      w.write(out);
+      out.close();
+      out = null;
+      return byteStream.toByteArray();
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+    }
+  }
+
+  /**
+   * Put a bunch of Writables as bytes all into the one byte array.
+   * @param ws writable
+   * @return The bytes of <code>w</code> gotten by running its
+   * {@link Writable#write(java.io.DataOutput)} method.
+   * @throws IOException e
+   */
+  public static byte [] getBytes(final Writable... ws) throws IOException {
+    List<byte []> bytes = new ArrayList<byte []>();
+    int size = 0;
+    for (Writable w: ws) {
+      byte [] b = getBytes(w);
+      size += b.length;
+      bytes.add(b);
+    }
+    byte [] result = new byte[size];
+    int offset = 0;
+    for (byte [] b: bytes) {
+      System.arraycopy(b, 0, result, offset, b.length);
+      offset += b.length;
+    }
+    return result;
+  }
+
+  /**
+   * Set bytes into the passed Writable by calling its
+   * {@link Writable#readFields(java.io.DataInput)}.
+   * @param bytes serialized bytes
+   * @param w An empty Writable (usually made by calling the null-arg
+   * constructor).
+   * @return The passed Writable after its readFields has been called fed
+   * by the passed <code>bytes</code> array or IllegalArgumentException
+   * if passed null or an empty <code>bytes</code> array.
+   * @throws IOException e
+   * @throws IllegalArgumentException
+   */
+  public static Writable getWritable(final byte [] bytes, final Writable w)
+  throws IOException {
+    return getWritable(bytes, 0, bytes.length, w);
+  }
+
+  /**
+   * Set bytes into the passed Writable by calling its
+   * {@link Writable#readFields(java.io.DataInput)}.
+   * @param bytes serialized bytes
+   * @param offset offset into array
+   * @param length length of data
+   * @param w An empty Writable (usually made by calling the null-arg
+   * constructor).
+   * @return The passed Writable after its readFields has been called fed
+   * by the passed <code>bytes</code> array or IllegalArgumentException
+   * if passed null or an empty <code>bytes</code> array.
+   * @throws IOException e
+   * @throws IllegalArgumentException
+   */
+  public static Writable getWritable(final byte [] bytes, final int offset,
+    final int length, final Writable w)
+  throws IOException {
+    if (bytes == null || length <=0) {
+      throw new IllegalArgumentException("Can't build a writable with empty " +
+        "bytes array");
+    }
+    if (w == null) {
+      throw new IllegalArgumentException("Writable cannot be null");
+    }
+    DataInputBuffer in = new DataInputBuffer();
+    try {
+      in.reset(bytes, offset, length);
+      w.readFields(in);
+      return w;
+    } finally {
+      in.close();
+    }
+  }
+
+  /**
+   * Copy one Writable to another.  Copies bytes using data streams.
+   * @param src Source Writable
+   * @param tgt Target Writable
+   * @return The target Writable.
+   * @throws IOException e
+   */
+  public static Writable copyWritable(final Writable src, final Writable tgt)
+  throws IOException {
+    return copyWritable(getBytes(src), tgt);
+  }
+
+  /**
+   * Copy one Writable to another.  Copies bytes using data streams.
+   * @param bytes Source Writable
+   * @param tgt Target Writable
+   * @return The target Writable.
+   * @throws IOException e
+   */
+  public static Writable copyWritable(final byte [] bytes, final Writable tgt)
+  throws IOException {
+    DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes));
+    try {
+      tgt.readFields(dis);
+    } finally {
+      dis.close();
+    }
+    return tgt;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.zookeeper.WatchedEvent;
+import org.apache.zookeeper.Watcher;
+
+/**
+ * An empty ZooKeeper watcher
+ */
+@InterfaceAudience.Private
+public class EmptyWatcher implements Watcher {
+  // Used in this package but also by tests so needs to be public
+  public static final EmptyWatcher instance = new EmptyWatcher();
+  private EmptyWatcher() {}
+
+  public void process(WatchedEvent event) {}
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,163 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.util.Strings;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.zookeeper.server.ServerConfig;
+import org.apache.zookeeper.server.ZooKeeperServerMain;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+import org.apache.zookeeper.server.quorum.QuorumPeerMain;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Properties;
+
+import static org.apache.hadoop.hbase.HConstants.DEFAULT_ZK_SESSION_TIMEOUT;
+import static org.apache.hadoop.hbase.HConstants.ZK_SESSION_TIMEOUT;
+
+/**
+ * HBase's version of ZooKeeper's QuorumPeer. When HBase is set to manage
+ * ZooKeeper, this class is used to start up QuorumPeer instances. By doing
+ * things in here rather than directly calling to ZooKeeper, we have more
+ * control over the process. This class uses {@link ZKConfig} to parse the
+ * zoo.cfg and inject variables from HBase's site.xml configuration in.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class HQuorumPeer {
+  
+  /**
+   * Parse ZooKeeper configuration from HBase XML config and run a QuorumPeer.
+   * @param args String[] of command line arguments. Not used.
+   */
+  public static void main(String[] args) {
+    Configuration conf = HBaseConfiguration.create();
+    try {
+      Properties zkProperties = ZKConfig.makeZKProps(conf);
+      writeMyID(zkProperties);
+      QuorumPeerConfig zkConfig = new QuorumPeerConfig();
+      zkConfig.parseProperties(zkProperties);
+
+      // login the zookeeper server principal (if using security)
+      ZKUtil.loginServer(conf, "hbase.zookeeper.server.keytab.file",
+        "hbase.zookeeper.server.kerberos.principal",
+        zkConfig.getClientPortAddress().getHostName());
+
+      runZKServer(zkConfig);
+    } catch (Exception e) {
+      e.printStackTrace();
+      System.exit(-1);
+    }
+  }
+
+  private static void runZKServer(QuorumPeerConfig zkConfig) throws UnknownHostException, IOException {
+    if (zkConfig.isDistributed()) {
+      QuorumPeerMain qp = new QuorumPeerMain();
+      qp.runFromConfig(zkConfig);
+    } else {
+      ZooKeeperServerMain zk = new ZooKeeperServerMain();
+      ServerConfig serverConfig = new ServerConfig();
+      serverConfig.readFrom(zkConfig);
+      zk.runFromConfig(serverConfig);
+    }
+  }
+
+  private static boolean addressIsLocalHost(String address) {
+    return address.equals("localhost") || address.equals("127.0.0.1");
+  }
+
+  static void writeMyID(Properties properties) throws IOException {
+    long myId = -1;
+
+    Configuration conf = HBaseConfiguration.create();
+    String myAddress = Strings.domainNamePointerToHostName(DNS.getDefaultHost(
+        conf.get("hbase.zookeeper.dns.interface","default"),
+        conf.get("hbase.zookeeper.dns.nameserver","default")));
+
+    List<String> ips = new ArrayList<String>();
+
+    // Add what could be the best (configured) match
+    ips.add(myAddress.contains(".") ?
+        myAddress :
+        StringUtils.simpleHostname(myAddress));
+
+    // For all nics get all hostnames and IPs
+    Enumeration<?> nics = NetworkInterface.getNetworkInterfaces();
+    while(nics.hasMoreElements()) {
+      Enumeration<?> rawAdrs =
+          ((NetworkInterface)nics.nextElement()).getInetAddresses();
+      while(rawAdrs.hasMoreElements()) {
+        InetAddress inet = (InetAddress) rawAdrs.nextElement();
+        ips.add(StringUtils.simpleHostname(inet.getHostName()));
+        ips.add(inet.getHostAddress());
+      }
+    }
+
+    for (Entry<Object, Object> entry : properties.entrySet()) {
+      String key = entry.getKey().toString().trim();
+      String value = entry.getValue().toString().trim();
+      if (key.startsWith("server.")) {
+        int dot = key.indexOf('.');
+        long id = Long.parseLong(key.substring(dot + 1));
+        String[] parts = value.split(":");
+        String address = parts[0];
+        if (addressIsLocalHost(address) || ips.contains(address)) {
+          myId = id;
+          break;
+        }
+      }
+    }
+
+    // Set the max session timeout from the provided client-side timeout
+    properties.setProperty("maxSessionTimeout",
+      conf.get(ZK_SESSION_TIMEOUT, Integer.toString(DEFAULT_ZK_SESSION_TIMEOUT)));
+
+    if (myId == -1) {
+      throw new IOException("Could not find my address: " + myAddress +
+                            " in list of ZooKeeper quorum servers");
+    }
+
+    String dataDirStr = properties.get("dataDir").toString().trim();
+    File dataDir = new File(dataDirStr);
+    if (!dataDir.isDirectory()) {
+      if (!dataDir.mkdirs()) {
+        throw new IOException("Unable to create data dir " + dataDir);
+      }
+    }
+
+    File myIdFile = new File(dataDir, "myid");
+    PrintWriter w = new PrintWriter(myIdFile);
+    w.println(myId);
+    w.close();
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MasterAddressTracker.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
+import org.apache.zookeeper.KeeperException;
+import org.apache.zookeeper.data.Stat;
+
+import java.io.IOException;
+
+/**
+ * Manages the location of the current active Master for the RegionServer.
+ * <p>
+ * Listens for ZooKeeper events related to the master address. The node
+ * <code>/master</code> will contain the address of the current master.
+ * This listener is interested in
+ * <code>NodeDeleted</code> and <code>NodeCreated</code> events on
+ * <code>/master</code>.
+ * <p>
+ * Utilizes {@link ZooKeeperNodeTracker} for zk interactions.
+ * <p>
+ * You can get the current master via {@link #getMasterAddress()} or via
+ * {@link #getMasterAddress(ZooKeeperWatcher)} if you do not have a running
+ * instance of this Tracker in your context.
+ * <p>
+ * This class also includes utility for interacting with the master znode, for
+ * writing and reading the znode content.
+ */
+@InterfaceAudience.Private
+public class MasterAddressTracker extends ZooKeeperNodeTracker {
+  /**
+   * Construct a master address listener with the specified
+   * <code>zookeeper</code> reference.
+   * <p>
+   * This constructor does not trigger any actions, you must call methods
+   * explicitly.  Normally you will just want to execute {@link #start()} to
+   * begin tracking of the master address.
+   *
+   * @param watcher zk reference and watcher
+   * @param abortable abortable in case of fatal error
+   */
+  public MasterAddressTracker(ZooKeeperWatcher watcher, Abortable abortable) {
+    super(watcher, watcher.getMasterAddressZNode(), abortable);
+  }
+
+  /**
+   * Get the address of the current master if one is available.  Returns null
+   * if no current master.
+   * @return Server name or null if timed out.
+   */
+  public ServerName getMasterAddress() {
+    return getMasterAddress(false);
+  }
+
+  /**
+   * Get the address of the current master if one is available.  Returns null
+   * if no current master. If refresh is set, try to load the data from ZK again,
+   * otherwise, cached data will be used.
+   *
+   * @param refresh whether to refresh the data by calling ZK directly.
+   * @return Server name or null if timed out.
+   */
+  public ServerName getMasterAddress(final boolean refresh) {
+    try {
+      return ServerName.parseFrom(super.getData(refresh));
+    } catch (DeserializationException e) {
+      LOG.warn("Failed parse", e);
+      return null;
+    }
+  }
+
+  /**
+   * Get master address.
+   * Use this instead of {@link #getMasterAddress()} if you do not have an
+   * instance of this tracker in your context.
+   * @param zkw ZooKeeperWatcher to use
+   * @return ServerName stored in the the master address znode or null if no
+   * znode present.
+   * @throws KeeperException 
+   * @throws IOException 
+   */
+  public static ServerName getMasterAddress(final ZooKeeperWatcher zkw)
+  throws KeeperException, IOException {
+    byte [] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode());
+    if (data == null){
+      throw new IOException("Can't get master address from ZooKeeper; znode data == null");
+    }
+    try {
+      return ServerName.parseFrom(data);
+    } catch (DeserializationException e) {
+      KeeperException ke = new KeeperException.DataInconsistencyException();
+      ke.initCause(e);
+      throw ke;
+    }
+  }
+
+  /**
+   * Set master address into the <code>master</code> znode or into the backup
+   * subdirectory of backup masters; switch off the passed in <code>znode</code>
+   * path.
+   * @param zkw The ZooKeeperWatcher to use.
+   * @param znode Where to create the znode; could be at the top level or it
+   * could be under backup masters
+   * @param master ServerName of the current master
+   * @return true if node created, false if not; a watch is set in both cases
+   * @throws KeeperException
+   */
+  public static boolean setMasterAddress(final ZooKeeperWatcher zkw,
+      final String znode, final ServerName master)
+  throws KeeperException {
+    return ZKUtil.createEphemeralNodeAndWatch(zkw, znode, toByteArray(master));
+  }
+
+  /**
+   * Check if there is a master available.
+   * @return true if there is a master set, false if not.
+   */
+  public boolean hasMaster() {
+    return super.getData(false) != null;
+  }
+
+  /**
+   * @param sn
+   * @return Content of the master znode as a serialized pb with the pb
+   * magic as prefix.
+   */
+   static byte [] toByteArray(final ServerName sn) {
+     ZooKeeperProtos.Master.Builder mbuilder = ZooKeeperProtos.Master.newBuilder();
+     HBaseProtos.ServerName.Builder snbuilder = HBaseProtos.ServerName.newBuilder();
+     snbuilder.setHostName(sn.getHostname());
+     snbuilder.setPort(sn.getPort());
+     snbuilder.setStartCode(sn.getStartcode());
+     mbuilder.setMaster(snbuilder.build());
+     return ProtobufUtil.prependPBMagic(mbuilder.build().toByteArray());
+   }
+
+  /**
+   * delete the master znode if its content is same as the parameter
+   */
+  public static boolean deleteIfEquals(ZooKeeperWatcher zkw, final String content) {
+    if (content == null){
+      throw new IllegalArgumentException("Content must not be null");
+    }
+
+    try {
+      Stat stat = new Stat();
+      byte[] data = ZKUtil.getDataNoWatch(zkw, zkw.getMasterAddressZNode(), stat);
+      ServerName sn = ServerName.parseFrom(data);
+      if (sn != null && content.equals(sn.toString())) {
+        return (ZKUtil.deleteNode(zkw, zkw.getMasterAddressZNode(), stat.getVersion()));
+      }
+    } catch (KeeperException e) {
+      LOG.warn("Can't get or delete the master znode", e);
+    } catch (DeserializationException e) {
+      LOG.warn("Can't get or delete the master znode", e);
+    }
+
+    return false;
+  }
+}

Added: hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java
URL: http://svn.apache.org/viewvc/hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java?rev=1449950&view=auto
==============================================================================
--- hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java (added)
+++ hbase/trunk/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetaNodeTracker.java Mon Feb 25 22:50:17 2013
@@ -0,0 +1,48 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HRegionInfo;
+
+/**
+ * Tracks the unassigned zookeeper node used by the META table.
+ * <p>
+ * If META is already assigned when instantiating this class, you will not
+ * receive any notification for that assignment.  You will receive a
+ * notification after META has been successfully assigned to a new location.
+ */
+@InterfaceAudience.Private
+public class MetaNodeTracker extends ZooKeeperNodeTracker {
+  /**
+   * Creates a meta node tracker.
+   * @param watcher
+   * @param abortable
+   */
+  public MetaNodeTracker(final ZooKeeperWatcher watcher, final Abortable abortable) {
+    super(watcher, ZKUtil.joinZNode(watcher.assignmentZNode,
+        HRegionInfo.FIRST_META_REGIONINFO.getEncodedName()), abortable);
+  }
+
+  @Override
+  public void nodeDeleted(String path) {
+    super.nodeDeleted(path);
+  }
+}