You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ap...@apache.org on 2017/11/17 21:20:42 UTC

[10/13] hbase git commit: HBASE-19114 Split out o.a.h.h.zookeeper from hbase-server and hbase-client

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
deleted file mode 100644
index a5b084b..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperNodeTracker.java
+++ /dev/null
@@ -1,251 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.zookeeper.KeeperException;
-
-/**
- * Tracks the availability and value of a single ZooKeeper node.
- *
- * <p>Utilizes the {@link ZooKeeperListener} interface to get the necessary
- * ZooKeeper events related to the node.
- *
- * <p>This is the base class used by trackers in both the Master and
- * RegionServers.
- */
-@InterfaceAudience.Private
-public abstract class ZooKeeperNodeTracker extends ZooKeeperListener {
-  // LOG is being used in subclasses, hence keeping it protected
-  protected static final Log LOG = LogFactory.getLog(ZooKeeperNodeTracker.class);
-  /** Path of node being tracked */
-  protected final String node;
-
-  /** Data of the node being tracked */
-  private byte [] data;
-
-  /** Used to abort if a fatal error occurs */
-  protected final Abortable abortable;
-
-  private boolean stopped = false;
-
-  /**
-   * Constructs a new ZK node tracker.
-   *
-   * <p>After construction, use {@link #start} to kick off tracking.
-   *
-   * @param watcher
-   * @param node
-   * @param abortable
-   */
-  public ZooKeeperNodeTracker(ZooKeeperWatcher watcher, String node,
-      Abortable abortable) {
-    super(watcher);
-    this.node = node;
-    this.abortable = abortable;
-    this.data = null;
-  }
-
-  /**
-   * Starts the tracking of the node in ZooKeeper.
-   *
-   * <p>Use {@link #blockUntilAvailable()} to block until the node is available
-   * or {@link #getData(boolean)} to get the data of the node if it is available.
-   */
-  public synchronized void start() {
-    this.watcher.registerListener(this);
-    try {
-      if(ZKUtil.watchAndCheckExists(watcher, node)) {
-        byte [] data = ZKUtil.getDataAndWatch(watcher, node);
-        if(data != null) {
-          this.data = data;
-        } else {
-          // It existed but now does not, try again to ensure a watch is set
-          LOG.debug("Try starting again because there is no data from " + node);
-          start();
-        }
-      }
-    } catch (KeeperException e) {
-      abortable.abort("Unexpected exception during initialization, aborting", e);
-    }
-  }
-
-  public synchronized void stop() {
-    this.stopped = true;
-    notifyAll();
-  }
-
-  /**
-   * Gets the data of the node, blocking until the node is available.
-   *
-   * @return data of the node
-   * @throws InterruptedException if the waiting thread is interrupted
-   */
-  public synchronized byte [] blockUntilAvailable()
-  throws InterruptedException {
-    return blockUntilAvailable(0, false);
-  }
-
-  /**
-   * Gets the data of the node, blocking until the node is available or the
-   * specified timeout has elapsed.
-   *
-   * @param timeout maximum time to wait for the node data to be available,
-   * n milliseconds.  Pass 0 for no timeout.
-   * @return data of the node
-   * @throws InterruptedException if the waiting thread is interrupted
-   */
-  public synchronized byte [] blockUntilAvailable(long timeout, boolean refresh)
-  throws InterruptedException {
-    if (timeout < 0) throw new IllegalArgumentException();
-    boolean notimeout = timeout == 0;
-    long startTime = System.currentTimeMillis();
-    long remaining = timeout;
-    if (refresh) {
-      try {
-        // This does not create a watch if the node does not exists
-        this.data = ZKUtil.getDataAndWatch(watcher, node);
-      } catch(KeeperException e) {
-        // We use to abort here, but in some cases the abort is ignored (
-        //  (empty Abortable), so it's better to log...
-        LOG.warn("Unexpected exception handling blockUntilAvailable", e);
-        abortable.abort("Unexpected exception handling blockUntilAvailable", e);
-      }
-    }
-    boolean nodeExistsChecked = (!refresh ||data!=null);
-    while (!this.stopped && (notimeout || remaining > 0) && this.data == null) {
-      if (!nodeExistsChecked) {
-        try {
-          nodeExistsChecked = (ZKUtil.checkExists(watcher, node) != -1);
-        } catch (KeeperException e) {
-          LOG.warn(
-            "Got exception while trying to check existence in  ZooKeeper" +
-            " of the node: "+node+", retrying if timeout not reached",e );
-        }
-
-        // It did not exists, and now it does.
-        if (nodeExistsChecked){
-          LOG.debug("Node " + node + " now exists, resetting a watcher");
-          try {
-            // This does not create a watch if the node does not exists
-            this.data = ZKUtil.getDataAndWatch(watcher, node);
-          } catch (KeeperException e) {
-            LOG.warn("Unexpected exception handling blockUntilAvailable", e);
-            abortable.abort("Unexpected exception handling blockUntilAvailable", e);
-          }
-        }
-      }
-      // We expect a notification; but we wait with a
-      //  a timeout to lower the impact of a race condition if any
-      wait(100);
-      remaining = timeout - (System.currentTimeMillis() - startTime);
-    }
-    return this.data;
-  }
-
-  /**
-   * Gets the data of the node.
-   *
-   * <p>If the node is currently available, the most up-to-date known version of
-   * the data is returned.  If the node is not currently available, null is
-   * returned.
-   * @param refresh whether to refresh the data by calling ZK directly.
-   * @return data of the node, null if unavailable
-   */
-  public synchronized byte [] getData(boolean refresh) {
-    if (refresh) {
-      try {
-        this.data = ZKUtil.getDataAndWatch(watcher, node);
-      } catch(KeeperException e) {
-        abortable.abort("Unexpected exception handling getData", e);
-      }
-    }
-    return this.data;
-  }
-
-  public String getNode() {
-    return this.node;
-  }
-
-  @Override
-  public synchronized void nodeCreated(String path) {
-    if (!path.equals(node)) return;
-    try {
-      byte [] data = ZKUtil.getDataAndWatch(watcher, node);
-      if (data != null) {
-        this.data = data;
-        notifyAll();
-      } else {
-        nodeDeleted(path);
-      }
-    } catch(KeeperException e) {
-      abortable.abort("Unexpected exception handling nodeCreated event", e);
-    }
-  }
-
-  @Override
-  public synchronized void nodeDeleted(String path) {
-    if(path.equals(node)) {
-      try {
-        if(ZKUtil.watchAndCheckExists(watcher, node)) {
-          nodeCreated(path);
-        } else {
-          this.data = null;
-        }
-      } catch(KeeperException e) {
-        abortable.abort("Unexpected exception handling nodeDeleted event", e);
-      }
-    }
-  }
-
-  @Override
-  public synchronized void nodeDataChanged(String path) {
-    if(path.equals(node)) {
-      nodeCreated(path);
-    }
-  }
-  
-  /**
-   * Checks if the baseznode set as per the property 'zookeeper.znode.parent'
-   * exists.
-   * @return true if baseznode exists.
-   *         false if doesnot exists.
-   */
-  public boolean checkIfBaseNodeAvailable() {
-    try {
-      if (ZKUtil.checkExists(watcher, watcher.znodePaths.baseZNode) == -1) {
-        return false;
-      }
-    } catch (KeeperException e) {
-      abortable.abort("Exception while checking if basenode (" + watcher.znodePaths.baseZNode
-          + ") exists in ZooKeeper.",
-        e);
-    }
-    return true;
-  }
-
-  @Override
-  public String toString() {
-    return "ZooKeeperNodeTracker{" +
-        "node='" + node + ", stopped=" + stopped + '}';
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
deleted file mode 100644
index 5d10cdf..0000000
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ /dev/null
@@ -1,638 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CountDownLatch;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.AuthUtil;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.security.Superusers;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.ZooDefs.Perms;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * Acts as the single ZooKeeper Watcher.  One instance of this is instantiated
- * for each Master, RegionServer, and client process.
- *
- * <p>This is the only class that implements {@link Watcher}.  Other internal
- * classes which need to be notified of ZooKeeper events must register with
- * the local instance of this watcher via {@link #registerListener}.
- *
- * <p>This class also holds and manages the connection to ZooKeeper.  Code to
- * deal with connection related events and exceptions are handled here.
- */
-@InterfaceAudience.Private
-public class ZooKeeperWatcher implements Watcher, Abortable, Closeable {
-  private static final Log LOG = LogFactory.getLog(ZooKeeperWatcher.class);
-
-  // Identifier for this watcher (for logging only).  It is made of the prefix
-  // passed on construction and the zookeeper sessionid.
-  private String prefix;
-  private String identifier;
-
-  // zookeeper quorum
-  private String quorum;
-
-  // zookeeper connection
-  private final RecoverableZooKeeper recoverableZooKeeper;
-
-  // abortable in case of zk failure
-  protected Abortable abortable;
-  // Used if abortable is null
-  private boolean aborted = false;
-
-  public final ZNodePaths znodePaths;
-
-  // listeners to be notified
-  private final List<ZooKeeperListener> listeners = new CopyOnWriteArrayList<>();
-
-  // Used by ZKUtil:waitForZKConnectionIfAuthenticating to wait for SASL
-  // negotiation to complete
-  public CountDownLatch saslLatch = new CountDownLatch(1);
-
-
-
-  private final Configuration conf;
-
-  /* A pattern that matches a Kerberos name, borrowed from Hadoop's KerberosName */
-  private static final Pattern NAME_PATTERN = Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
-
-  /**
-   * Instantiate a ZooKeeper connection and watcher.
-   * @param identifier string that is passed to RecoverableZookeeper to be used as
-   * identifier for this instance. Use null for default.
-   * @throws IOException
-   * @throws ZooKeeperConnectionException
-   */
-  public ZooKeeperWatcher(Configuration conf, String identifier,
-      Abortable abortable) throws ZooKeeperConnectionException, IOException {
-    this(conf, identifier, abortable, false);
-  }
-
-  /**
-   * Instantiate a ZooKeeper connection and watcher.
-   * @param conf
-   * @param identifier string that is passed to RecoverableZookeeper to be used as identifier for
-   *          this instance. Use null for default.
-   * @param abortable Can be null if there is on error there is no host to abort: e.g. client
-   *          context.
-   * @param canCreateBaseZNode
-   * @throws IOException
-   * @throws ZooKeeperConnectionException
-   */
-  public ZooKeeperWatcher(Configuration conf, String identifier,
-      Abortable abortable, boolean canCreateBaseZNode)
-  throws IOException, ZooKeeperConnectionException {
-    this.conf = conf;
-    this.quorum = ZKConfig.getZKQuorumServersString(conf);
-    this.prefix = identifier;
-    // Identifier will get the sessionid appended later below down when we
-    // handle the syncconnect event.
-    this.identifier = identifier + "0x0";
-    this.abortable = abortable;
-    this.znodePaths = new ZNodePaths(conf);
-    PendingWatcher pendingWatcher = new PendingWatcher();
-    this.recoverableZooKeeper = ZKUtil.connect(conf, quorum, pendingWatcher, identifier);
-    pendingWatcher.prepare(this);
-    if (canCreateBaseZNode) {
-      try {
-        createBaseZNodes();
-      } catch (ZooKeeperConnectionException zce) {
-        try {
-          this.recoverableZooKeeper.close();
-        } catch (InterruptedException ie) {
-          LOG.debug("Encountered InterruptedException when closing " + this.recoverableZooKeeper);
-          Thread.currentThread().interrupt();
-        }
-        throw zce;
-      }
-    }
-  }
-
-  private void createBaseZNodes() throws ZooKeeperConnectionException {
-    try {
-      // Create all the necessary "directories" of znodes
-      ZKUtil.createWithParents(this, znodePaths.baseZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.rsZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.drainingZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.tableZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.splitLogZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.backupMasterAddressesZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.tableLockZNode);
-      ZKUtil.createAndFailSilent(this, znodePaths.masterMaintZNode);
-    } catch (KeeperException e) {
-      throw new ZooKeeperConnectionException(
-          prefix("Unexpected KeeperException creating base node"), e);
-    }
-  }
-
-  /** Returns whether the znode is supposed to be readable by the client
-   * and DOES NOT contain sensitive information (world readable).*/
-  public boolean isClientReadable(String node) {
-    // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS
-    // all clients need to access this data to work. Using zk for sharing data to clients (other
-    // than service lookup case is not a recommended design pattern.
-    return
-        node.equals(znodePaths.baseZNode) ||
-        znodePaths.isAnyMetaReplicaZNode(node) ||
-        node.equals(znodePaths.masterAddressZNode) ||
-        node.equals(znodePaths.clusterIdZNode)||
-        node.equals(znodePaths.rsZNode) ||
-        // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not
-        node.equals(znodePaths.tableZNode) ||
-        node.startsWith(znodePaths.tableZNode + "/");
-  }
-
-  /**
-   * On master start, we check the znode ACLs under the root directory and set the ACLs properly
-   * if needed. If the cluster goes from an unsecure setup to a secure setup, this step is needed
-   * so that the existing znodes created with open permissions are now changed with restrictive
-   * perms.
-   */
-  public void checkAndSetZNodeAcls() {
-    if (!ZKUtil.isSecureZooKeeper(getConfiguration())) {
-      LOG.info("not a secure deployment, proceeding");
-      return;
-    }
-
-    // Check the base znodes permission first. Only do the recursion if base znode's perms are not
-    // correct.
-    try {
-      List<ACL> actualAcls = recoverableZooKeeper.getAcl(znodePaths.baseZNode, new Stat());
-
-      if (!isBaseZnodeAclSetup(actualAcls)) {
-        LOG.info("setting znode ACLs");
-        setZnodeAclsRecursive(znodePaths.baseZNode);
-      }
-    } catch(KeeperException.NoNodeException nne) {
-      return;
-    } catch(InterruptedException ie) {
-      interruptedExceptionNoThrow(ie, false);
-    } catch (IOException|KeeperException e) {
-      LOG.warn("Received exception while checking and setting zookeeper ACLs", e);
-    }
-  }
-
-  /**
-   * Set the znode perms recursively. This will do post-order recursion, so that baseZnode ACLs
-   * will be set last in case the master fails in between.
-   * @param znode
-   */
-  private void setZnodeAclsRecursive(String znode) throws KeeperException, InterruptedException {
-    List<String> children = recoverableZooKeeper.getChildren(znode, false);
-
-    for (String child : children) {
-      setZnodeAclsRecursive(ZNodePaths.joinZNode(znode, child));
-    }
-    List<ACL> acls = ZKUtil.createACL(this, znode, true);
-    LOG.info("Setting ACLs for znode:" + znode + " , acl:" + acls);
-    recoverableZooKeeper.setAcl(znode, acls, -1);
-  }
-
-  /**
-   * Checks whether the ACLs returned from the base znode (/hbase) is set for secure setup.
-   * @param acls acls from zookeeper
-   * @return whether ACLs are set for the base znode
-   * @throws IOException
-   */
-  private boolean isBaseZnodeAclSetup(List<ACL> acls) throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Checking znode ACLs");
-    }
-    String[] superUsers = conf.getStrings(Superusers.SUPERUSER_CONF_KEY);
-    // Check whether ACL set for all superusers
-    if (superUsers != null && !checkACLForSuperUsers(superUsers, acls)) {
-      return false;
-    }
-
-    // this assumes that current authenticated user is the same as zookeeper client user
-    // configured via JAAS
-    String hbaseUser = UserGroupInformation.getCurrentUser().getShortUserName();
-
-    if (acls.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("ACL is empty");
-      }
-      return false;
-    }
-
-    for (ACL acl : acls) {
-      int perms = acl.getPerms();
-      Id id = acl.getId();
-      // We should only set at most 3 possible ACLs for 3 Ids. One for everyone, one for superuser
-      // and one for the hbase user
-      if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
-        if (perms != Perms.READ) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x",
-              id, perms, Perms.READ));
-          }
-          return false;
-        }
-      } else if (superUsers != null && isSuperUserId(superUsers, id)) {
-        if (perms != Perms.ALL) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x",
-              id, perms, Perms.ALL));
-          }
-          return false;
-        }
-      } else if ("sasl".equals(id.getScheme())) {
-        String name = id.getId();
-        // If ZooKeeper recorded the Kerberos full name in the ACL, use only the shortname
-        Matcher match = NAME_PATTERN.matcher(name);
-        if (match.matches()) {
-          name = match.group(1);
-        }
-        if (name.equals(hbaseUser)) {
-          if (perms != Perms.ALL) {
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(String.format("permissions for '%s' are not correct: have 0x%x, want 0x%x",
-                id, perms, Perms.ALL));
-            }
-            return false;
-          }
-        } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Unexpected shortname in SASL ACL: " + id);
-          }
-          return false;
-        }
-      } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("unexpected ACL id '" + id + "'");
-        }
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /*
-   * Validate whether ACL set for all superusers.
-   */
-  private boolean checkACLForSuperUsers(String[] superUsers, List<ACL> acls) {
-    for (String user : superUsers) {
-      boolean hasAccess = false;
-      // TODO: Validate super group members also when ZK supports setting node ACL for groups.
-      if (!AuthUtil.isGroupPrincipal(user)) {
-        for (ACL acl : acls) {
-          if (user.equals(acl.getId().getId())) {
-            if (acl.getPerms() == Perms.ALL) {
-              hasAccess = true;
-            } else {
-              if (LOG.isDebugEnabled()) {
-                LOG.debug(String.format(
-                  "superuser '%s' does not have correct permissions: have 0x%x, want 0x%x",
-                  acl.getId().getId(), acl.getPerms(), Perms.ALL));
-              }
-            }
-            break;
-          }
-        }
-        if (!hasAccess) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  /*
-   * Validate whether ACL ID is superuser.
-   */
-  public static boolean isSuperUserId(String[] superUsers, Id id) {
-    for (String user : superUsers) {
-      // TODO: Validate super group members also when ZK supports setting node ACL for groups.
-      if (!AuthUtil.isGroupPrincipal(user) && new Id("sasl", user).equals(id)) {
-        return true;
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public String toString() {
-    return this.identifier + ", quorum=" + quorum + ", baseZNode=" + znodePaths.baseZNode;
-  }
-
-  /**
-   * Adds this instance's identifier as a prefix to the passed <code>str</code>
-   * @param str String to amend.
-   * @return A new string with this instance's identifier as prefix: e.g.
-   * if passed 'hello world', the returned string could be
-   */
-  public String prefix(final String str) {
-    return this.toString() + " " + str;
-  }
-
-  /**
-   * Get the znodes corresponding to the meta replicas from ZK
-   * @return list of znodes
-   * @throws KeeperException
-   */
-  public List<String> getMetaReplicaNodes() throws KeeperException {
-    List<String> childrenOfBaseNode = ZKUtil.listChildrenNoWatch(this, znodePaths.baseZNode);
-    List<String> metaReplicaNodes = new ArrayList<>(2);
-    if (childrenOfBaseNode != null) {
-      String pattern = conf.get("zookeeper.znode.metaserver","meta-region-server");
-      for (String child : childrenOfBaseNode) {
-        if (child.startsWith(pattern)) metaReplicaNodes.add(child);
-      }
-    }
-    return metaReplicaNodes;
-  }
-
-  /**
-   * Register the specified listener to receive ZooKeeper events.
-   * @param listener
-   */
-  public void registerListener(ZooKeeperListener listener) {
-    listeners.add(listener);
-  }
-
-  /**
-   * Register the specified listener to receive ZooKeeper events and add it as
-   * the first in the list of current listeners.
-   * @param listener
-   */
-  public void registerListenerFirst(ZooKeeperListener listener) {
-    listeners.add(0, listener);
-  }
-
-  public void unregisterListener(ZooKeeperListener listener) {
-    listeners.remove(listener);
-  }
-
-  /**
-   * Clean all existing listeners
-   */
-  public void unregisterAllListeners() {
-    listeners.clear();
-  }
-
-  /**
-   * Get a copy of current registered listeners
-   */
-  public List<ZooKeeperListener> getListeners() {
-    return new ArrayList<>(listeners);
-  }
-
-  /**
-   * @return The number of currently registered listeners
-   */
-  public int getNumberOfListeners() {
-    return listeners.size();
-  }
-
-  /**
-   * Get the connection to ZooKeeper.
-   * @return connection reference to zookeeper
-   */
-  public RecoverableZooKeeper getRecoverableZooKeeper() {
-    return recoverableZooKeeper;
-  }
-
-  public void reconnectAfterExpiration() throws IOException, KeeperException, InterruptedException {
-    recoverableZooKeeper.reconnectAfterExpiration();
-  }
-
-  /**
-   * Get the quorum address of this instance.
-   * @return quorum string of this zookeeper connection instance
-   */
-  public String getQuorum() {
-    return quorum;
-  }
-
-  /**
-   * Get the znodePaths.
-   * <p>
-   * Mainly used for mocking as mockito can not mock a field access.
-   */
-  public ZNodePaths getZNodePaths() {
-    return znodePaths;
-  }
-
-  /**
-   * Method called from ZooKeeper for events and connection status.
-   * <p>
-   * Valid events are passed along to listeners.  Connection status changes
-   * are dealt with locally.
-   */
-  @Override
-  public void process(WatchedEvent event) {
-    LOG.debug(prefix("Received ZooKeeper Event, " +
-        "type=" + event.getType() + ", " +
-        "state=" + event.getState() + ", " +
-        "path=" + event.getPath()));
-
-    switch(event.getType()) {
-
-      // If event type is NONE, this is a connection status change
-      case None: {
-        connectionEvent(event);
-        break;
-      }
-
-      // Otherwise pass along to the listeners
-
-      case NodeCreated: {
-        for(ZooKeeperListener listener : listeners) {
-          listener.nodeCreated(event.getPath());
-        }
-        break;
-      }
-
-      case NodeDeleted: {
-        for(ZooKeeperListener listener : listeners) {
-          listener.nodeDeleted(event.getPath());
-        }
-        break;
-      }
-
-      case NodeDataChanged: {
-        for(ZooKeeperListener listener : listeners) {
-          listener.nodeDataChanged(event.getPath());
-        }
-        break;
-      }
-
-      case NodeChildrenChanged: {
-        for(ZooKeeperListener listener : listeners) {
-          listener.nodeChildrenChanged(event.getPath());
-        }
-        break;
-      }
-    }
-  }
-
-  // Connection management
-
-  /**
-   * Called when there is a connection-related event via the Watcher callback.
-   * <p>
-   * If Disconnected or Expired, this should shutdown the cluster. But, since
-   * we send a KeeperException.SessionExpiredException along with the abort
-   * call, it's possible for the Abortable to catch it and try to create a new
-   * session with ZooKeeper. This is what the client does in HCM.
-   * <p>
-   * @param event
-   */
-  private void connectionEvent(WatchedEvent event) {
-    switch(event.getState()) {
-      case SyncConnected:
-        this.identifier = this.prefix + "-0x" +
-          Long.toHexString(this.recoverableZooKeeper.getSessionId());
-        // Update our identifier.  Otherwise ignore.
-        LOG.debug(this.identifier + " connected");
-        break;
-
-      // Abort the server if Disconnected or Expired
-      case Disconnected:
-        LOG.debug(prefix("Received Disconnected from ZooKeeper, ignoring"));
-        break;
-
-      case Expired:
-        String msg = prefix(this.identifier + " received expired from " +
-          "ZooKeeper, aborting");
-        // TODO: One thought is to add call to ZooKeeperListener so say,
-        // ZooKeeperNodeTracker can zero out its data values.
-        if (this.abortable != null) {
-          this.abortable.abort(msg, new KeeperException.SessionExpiredException());
-        }
-        break;
-
-      case ConnectedReadOnly:
-      case SaslAuthenticated:
-      case AuthFailed:
-        break;
-
-      default:
-        throw new IllegalStateException("Received event is not valid: " + event.getState());
-    }
-  }
-
-  /**
-   * Forces a synchronization of this ZooKeeper client connection.
-   * <p>
-   * Executing this method before running other methods will ensure that the
-   * subsequent operations are up-to-date and consistent as of the time that
-   * the sync is complete.
-   * <p>
-   * This is used for compareAndSwap type operations where we need to read the
-   * data of an existing node and delete or transition that node, utilizing the
-   * previously read version and data.  We want to ensure that the version read
-   * is up-to-date from when we begin the operation.
-   */
-  public void sync(String path) throws KeeperException {
-    this.recoverableZooKeeper.sync(path, null, null);
-  }
-
-  /**
-   * Handles KeeperExceptions in client calls.
-   * <p>
-   * This may be temporary but for now this gives one place to deal with these.
-   * <p>
-   * TODO: Currently this method rethrows the exception to let the caller handle
-   * <p>
-   * @param ke
-   * @throws KeeperException
-   */
-  public void keeperException(KeeperException ke)
-  throws KeeperException {
-    LOG.error(prefix("Received unexpected KeeperException, re-throwing exception"), ke);
-    throw ke;
-  }
-
-  /**
-   * Handles InterruptedExceptions in client calls.
-   * @param ie the InterruptedException instance thrown
-   * @throws KeeperException the exception to throw, transformed from the InterruptedException
-   */
-  public void interruptedException(InterruptedException ie) throws KeeperException {
-    interruptedExceptionNoThrow(ie, true);
-    // Throw a system error exception to let upper level handle it
-    throw new KeeperException.SystemErrorException();
-  }
-
-  /**
-   * Log the InterruptedException and interrupt current thread
-   * @param ie The IterruptedException to log
-   * @param throwLater Whether we will throw the exception latter
-   */
-  public void interruptedExceptionNoThrow(InterruptedException ie, boolean throwLater) {
-    LOG.debug(prefix("Received InterruptedException, will interrupt current thread"
-        + (throwLater ? " and rethrow a SystemErrorException" : "")),
-      ie);
-    // At least preserve interrupt.
-    Thread.currentThread().interrupt();
-  }
-
-  /**
-   * Close the connection to ZooKeeper.
-   *
-   */
-  @Override
-  public void close() {
-    try {
-      recoverableZooKeeper.close();
-    } catch (InterruptedException e) {
-      Thread.currentThread().interrupt();
-    }
-  }
-
-  public Configuration getConfiguration() {
-    return conf;
-  }
-
-  @Override
-  public void abort(String why, Throwable e) {
-    if (this.abortable != null) this.abortable.abort(why, e);
-    else this.aborted = true;
-  }
-
-  @Override
-  public boolean isAborted() {
-    return this.abortable == null? this.aborted: this.abortable.isAborted();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java
deleted file mode 100644
index e67c9fd..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestInstancePending.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.util.concurrent.atomic.AtomicReference;
-
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category(SmallTests.class)
-public class TestInstancePending {
-  @Test(timeout = 1000)
-  public void test() throws Exception {
-    final InstancePending<String> pending = new InstancePending<>();
-    final AtomicReference<String> getResultRef = new AtomicReference<>();
-
-    new Thread() {
-      @Override
-      public void run() {
-        getResultRef.set(pending.get());
-      }
-    }.start();
-
-    Thread.sleep(100);
-    Assert.assertNull(getResultRef.get());
-
-    pending.prepare("abc");
-    Thread.sleep(100);
-    Assert.assertEquals("abc", getResultRef.get());
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java
deleted file mode 100644
index a987bec..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeper.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import static org.mockito.Mockito.*;
-
-@Category(SmallTests.class)
-public class TestMetricsZooKeeper {
-
-  @Test
-  public void testRegisterExceptions() {
-    MetricsZooKeeperSource zkSource = mock(MetricsZooKeeperSourceImpl.class);
-    MetricsZooKeeper metricsZK = new MetricsZooKeeper(zkSource);
-    metricsZK.registerAuthFailedException();
-    metricsZK.registerConnectionLossException();
-    metricsZK.registerConnectionLossException();
-    metricsZK.registerDataInconsistencyException();
-    metricsZK.registerInvalidACLException();
-    metricsZK.registerNoAuthException();
-    metricsZK.registerOperationTimeoutException();
-    metricsZK.registerOperationTimeoutException();
-    metricsZK.registerRuntimeInconsistencyException();
-    metricsZK.registerSessionExpiredException();
-    metricsZK.registerSystemErrorException();
-    metricsZK.registerSystemErrorException();
-    metricsZK.registerFailedZKCall();
-
-    verify(zkSource, times(1)).incrementAuthFailedCount();
-    // ConnectionLoss Exception was registered twice.
-    verify(zkSource, times(2)).incrementConnectionLossCount();
-    verify(zkSource, times(1)).incrementDataInconsistencyCount();
-    verify(zkSource, times(1)).incrementInvalidACLCount();
-    verify(zkSource, times(1)).incrementNoAuthCount();
-    // OperationTimeout Exception was registered twice.
-    verify(zkSource, times(2)).incrementOperationTimeoutCount();
-    verify(zkSource, times(1)).incrementRuntimeInconsistencyCount();
-    verify(zkSource, times(1)).incrementSessionExpiredCount();
-    // SystemError Exception was registered twice.
-    verify(zkSource, times(2)).incrementSystemErrorCount();
-    verify(zkSource, times(1)).incrementTotalFailedZKCalls();
-  }
-
-  @Test
-  public void testLatencyHistogramUpdates() {
-    MetricsZooKeeperSource zkSource = mock(MetricsZooKeeperSourceImpl.class);
-    MetricsZooKeeper metricsZK = new MetricsZooKeeper(zkSource);
-    long latency = 100;
-
-    metricsZK.registerReadOperationLatency(latency);
-    metricsZK.registerReadOperationLatency(latency);
-    metricsZK.registerWriteOperationLatency(latency);
-    metricsZK.registerSyncOperationLatency(latency);
-    // Read Operation Latency update was registered twice.
-    verify(zkSource, times(2)).recordReadOperationLatency(latency);
-    verify(zkSource, times(1)).recordWriteOperationLatency(latency);
-    verify(zkSource, times(1)).recordSyncOperationLatency(latency);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java
deleted file mode 100644
index 6b1e1f0..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.ZooKeeperConnectionException;
-import org.apache.hadoop.hbase.security.Superusers;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.ZooDefs.Perms;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.data.Id;
-import org.junit.Assert;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-import org.mockito.Mockito;
-
-/**
- *
- */
-@Category({SmallTests.class})
-public class TestZKUtil {
-
-  @Test
-  public void testUnsecure() throws ZooKeeperConnectionException, IOException {
-    Configuration conf = HBaseConfiguration.create();
-    conf.set(Superusers.SUPERUSER_CONF_KEY, "user1");
-    String node = "/hbase/testUnsecure";
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
-    List<ACL> aclList = ZKUtil.createACL(watcher, node, false);
-    Assert.assertEquals(aclList.size(), 1);
-    Assert.assertTrue(aclList.contains(Ids.OPEN_ACL_UNSAFE.iterator().next()));
-  }
-
-  @Test
-  public void testSecuritySingleSuperuser() throws ZooKeeperConnectionException, IOException {
-    Configuration conf = HBaseConfiguration.create();
-    conf.set(Superusers.SUPERUSER_CONF_KEY, "user1");
-    String node = "/hbase/testSecuritySingleSuperuser";
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
-    List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
-    Assert.assertEquals(aclList.size(), 2); // 1+1, since ACL will be set for the creator by default
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1"))));
-    Assert.assertTrue(aclList.contains(Ids.CREATOR_ALL_ACL.iterator().next()));
-  }
-
-  @Test
-  public void testCreateACL() throws ZooKeeperConnectionException, IOException {
-    Configuration conf = HBaseConfiguration.create();
-    conf.set(Superusers.SUPERUSER_CONF_KEY, "user1,@group1,user2,@group2,user3");
-    String node = "/hbase/testCreateACL";
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
-    List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
-    Assert.assertEquals(aclList.size(), 4); // 3+1, since ACL will be set for the creator by default
-    Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
-    Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group2"))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1"))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user2"))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user3"))));
-  }
-
-  @Test
-  public void testCreateACLWithSameUser() throws ZooKeeperConnectionException, IOException {
-    Configuration conf = HBaseConfiguration.create();
-    conf.set(Superusers.SUPERUSER_CONF_KEY, "user4,@group1,user5,user6");
-    UserGroupInformation.setLoginUser(UserGroupInformation.createRemoteUser("user4"));
-    String node = "/hbase/testCreateACL";
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, node, null, false);
-    List<ACL> aclList = ZKUtil.createACL(watcher, node, true);
-    Assert.assertEquals(aclList.size(), 3); // 3, since service user the same as one of superuser
-    Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1"))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", ""))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user5"))));
-    Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user6"))));
-  }
-
-  @Test(expected = KeeperException.SystemErrorException.class)
-  public void testInterruptedDuringAction()
-      throws ZooKeeperConnectionException, IOException, KeeperException, InterruptedException {
-    final RecoverableZooKeeper recoverableZk = Mockito.mock(RecoverableZooKeeper.class);
-    ZooKeeperWatcher zkw = new ZooKeeperWatcher(HBaseConfiguration.create(), "unittest", null) {
-      @Override
-      public RecoverableZooKeeper getRecoverableZooKeeper() {
-        return recoverableZk;
-      }
-    };
-    Mockito.doThrow(new InterruptedException()).when(recoverableZk)
-        .getChildren(zkw.znodePaths.baseZNode, null);
-    ZKUtil.listChildrenNoWatch(zkw, zkw.znodePaths.baseZNode);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
deleted file mode 100644
index 3c99175..0000000
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZooKeeperWatcher.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.zookeeper;
-
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.testclassification.SmallTests;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-@Category({ SmallTests.class })
-public class TestZooKeeperWatcher {
-
-  @Test
-  public void testIsClientReadable() throws IOException {
-    ZooKeeperWatcher watcher =
-      new ZooKeeperWatcher(HBaseConfiguration.create(), "testIsClientReadable", null, false);
-
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.baseZNode));
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.getZNodeForReplica(0)));
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.masterAddressZNode));
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.clusterIdZNode));
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.tableZNode));
-    assertTrue(
-      watcher.isClientReadable(ZNodePaths.joinZNode(watcher.znodePaths.tableZNode, "foo")));
-    assertTrue(watcher.isClientReadable(watcher.znodePaths.rsZNode));
-
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.tableLockZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.balancerZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.regionNormalizerZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.clusterStateZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.drainingZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.splitLogZNode));
-    assertFalse(watcher.isClientReadable(watcher.znodePaths.backupMasterAddressesZNode));
-
-    watcher.close();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
index eba8518..c44be52 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestMetaReplicas.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.regionserver.StorefileRefresherChore;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -37,7 +37,7 @@ import org.junit.experimental.categories.Category;
 /**
  * An integration test that starts the cluster with three replicas for the meta
  * It then creates a table, flushes the meta, kills the server holding the primary.
- * After that a client issues put/get requests on the created table - the other 
+ * After that a client issues put/get requests on the created table - the other
  * replicas of the meta would be used to get the location of the region of the created
  * table.
  */
@@ -60,7 +60,7 @@ public class IntegrationTestMetaReplicas {
         StorefileRefresherChore.REGIONSERVER_STOREFILE_REFRESH_PERIOD, 1000);
     // Make sure there are three servers.
     util.initializeCluster(3);
-    ZooKeeperWatcher zkw = util.getZooKeeperWatcher();
+    ZKWatcher zkw = util.getZooKeeperWatcher();
     Configuration conf = util.getConfiguration();
     String baseZNode = conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT,
         HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT);

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
index 92bad7f..09cbda3 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestZKAndFSPermissions.java
@@ -39,8 +39,8 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.Code;
@@ -139,7 +139,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool {
 
   private void testZNodeACLs() throws IOException, KeeperException, InterruptedException {
 
-    ZooKeeperWatcher watcher = new ZooKeeperWatcher(conf, "IntegrationTestZnodeACLs", null);
+    ZKWatcher watcher = new ZKWatcher(conf, "IntegrationTestZnodeACLs", null);
     RecoverableZooKeeper zk = ZKUtil.connect(this.conf, watcher);
 
     String baseZNode = watcher.znodePaths.baseZNode;
@@ -155,7 +155,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool {
     LOG.info("Checking ZK permissions: SUCCESS");
   }
 
-  private void checkZnodePermsRecursive(ZooKeeperWatcher watcher,
+  private void checkZnodePermsRecursive(ZKWatcher watcher,
       RecoverableZooKeeper zk, String znode) throws KeeperException, InterruptedException {
 
     boolean expectedWorldReadable = watcher.isClientReadable(znode);
@@ -201,7 +201,7 @@ public class IntegrationTestZKAndFSPermissions extends AbstractHBaseTool {
         assertTrue(expectedWorldReadable);
         // assert that anyone can only read
         assertEquals(perms, Perms.READ);
-      } else if (superUsers != null && ZooKeeperWatcher.isSuperUserId(superUsers, id)) {
+      } else if (superUsers != null && ZKWatcher.isSuperUserId(superUsers, id)) {
         // assert that super user has all the permissions
         assertEquals(perms, Perms.ALL);
       } else if (new Id("sasl", masterPrincipal).equals(id)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
index d25d497..bf0081b 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
@@ -64,7 +65,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.MapReduceCell;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.io.RawComparator;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
@@ -406,10 +406,10 @@ public class Import extends Configured implements Tool {
         LOG.info("setting WAL durability to default.");
       }
       // TODO: This is kind of ugly doing setup of ZKW just to read the clusterid.
-      ZooKeeperWatcher zkw = null;
+      ZKWatcher zkw = null;
       Exception ex = null;
       try {
-        zkw = new ZooKeeperWatcher(conf, context.getTaskAttemptID().toString(), null);
+        zkw = new ZKWatcher(conf, context.getTaskAttemptID().toString(), null);
         clusterIds = Collections.singletonList(ZKClusterId.getUUIDForCluster(zkw));
       } catch (ZooKeeperConnectionException e) {
         ex = e;

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
index acf6ff8..b8de9ec 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java
@@ -57,7 +57,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.mapreduce.InputSplit;
 import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -330,10 +330,10 @@ public class VerifyReplication extends Configured implements Tool {
 
   private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(
       final Configuration conf, String peerId) throws IOException {
-    ZooKeeperWatcher localZKW = null;
+    ZKWatcher localZKW = null;
     ReplicationPeerZKImpl peer = null;
     try {
-      localZKW = new ZooKeeperWatcher(conf, "VerifyReplication",
+      localZKW = new ZKWatcher(conf, "VerifyReplication",
           new Abortable() {
             @Override public void abort(String why, Throwable e) {}
             @Override public boolean isAborted() {return false;}

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index 942fd8c..039f8ac 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -130,6 +130,10 @@
       <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-client</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-zookeeper</artifactId>
+    </dependency>
     <!-- General dependencies -->
     <dependency>
       <groupId>org.apache.commons</groupId>

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
index 9d12211..3ff6914 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java
@@ -23,7 +23,7 @@ import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 
 /**
  * A factory class for instantiating replication objects that deal with replication state.
@@ -48,17 +48,17 @@ public class ReplicationFactory {
     return (ReplicationQueuesClient) ConstructorUtils.invokeConstructor(classToBuild, args);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf,
-      Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf,
+                                                     Abortable abortable) {
     return getReplicationPeers(zk, conf, null, abortable);
   }
 
-  public static ReplicationPeers getReplicationPeers(final ZooKeeperWatcher zk, Configuration conf,
-      final ReplicationQueuesClient queuesClient, Abortable abortable) {
+  public static ReplicationPeers getReplicationPeers(final ZKWatcher zk, Configuration conf,
+                                                     final ReplicationQueuesClient queuesClient, Abortable abortable) {
     return new ReplicationPeersZKImpl(zk, conf, queuesClient, abortable);
   }
 
-  public static ReplicationTracker getReplicationTracker(ZooKeeperWatcher zookeeper,
+  public static ReplicationTracker getReplicationTracker(ZKWatcher zookeeper,
       final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable,
       Stoppable stopper) {
     return new ReplicationTrackerZKImpl(zookeeper, replicationPeers, conf, abortable, stopper);

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
index 8f09479..214a313 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java
@@ -34,9 +34,9 @@ import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+import org.apache.hadoop.hbase.zookeeper.ZKNodeTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperNodeTracker;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.KeeperException.NodeExistsException;
@@ -62,7 +62,7 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase
    * @param id string representation of this peer's identifier
    * @param peerConfig configuration for the replication peer
    */
-  public ReplicationPeerZKImpl(ZooKeeperWatcher zkWatcher, Configuration conf,
+  public ReplicationPeerZKImpl(ZKWatcher zkWatcher, Configuration conf,
                                String id, ReplicationPeerConfig peerConfig,
                                Abortable abortable)
       throws ReplicationException {
@@ -258,9 +258,9 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase
   /**
    * Tracker for state of this peer
    */
-  public class PeerStateTracker extends ZooKeeperNodeTracker {
+  public class PeerStateTracker extends ZKNodeTracker {
 
-    public PeerStateTracker(String peerStateZNode, ZooKeeperWatcher watcher,
+    public PeerStateTracker(String peerStateZNode, ZKWatcher watcher,
         Abortable abortable) {
       super(watcher, peerStateZNode, abortable);
     }
@@ -281,11 +281,11 @@ public class ReplicationPeerZKImpl extends ReplicationStateZKBase
   /**
    * Tracker for PeerConfigNode of this peer
    */
-  public class PeerConfigTracker extends ZooKeeperNodeTracker {
+  public class PeerConfigTracker extends ZKNodeTracker {
 
     ReplicationPeerConfigListener listener;
 
-    public PeerConfigTracker(String peerConfigNode, ZooKeeperWatcher watcher,
+    public PeerConfigTracker(String peerConfigNode, ZKWatcher watcher,
         Abortable abortable) {
       super(watcher, peerConfigNode, abortable);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index cc84c1d..9c4e3fe 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -43,8 +43,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
@@ -84,8 +84,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
 
   private static final Log LOG = LogFactory.getLog(ReplicationPeersZKImpl.class);
 
-  public ReplicationPeersZKImpl(final ZooKeeperWatcher zk, final Configuration conf,
-      final ReplicationQueuesClient queuesClient, Abortable abortable) {
+  public ReplicationPeersZKImpl(final ZKWatcher zk, final Configuration conf,
+                                final ReplicationQueuesClient queuesClient, Abortable abortable) {
     super(zk, conf, abortable);
     this.abortable = abortable;
     this.peerClusters = new ConcurrentHashMap<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
index c8328bd..c2a5df3 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesArguments.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.replication;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
 /**
  * Wrapper around common arguments used to construct ReplicationQueues. Used to construct various
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 @InterfaceAudience.Private
 public class ReplicationQueuesArguments {
 
-  private ZooKeeperWatcher zk;
+  private ZKWatcher zk;
   private Configuration conf;
   private Abortable abort;
 
@@ -39,16 +39,16 @@ public class ReplicationQueuesArguments {
     this.abort = abort;
   }
 
-  public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZooKeeperWatcher zk) {
+  public ReplicationQueuesArguments(Configuration conf, Abortable abort, ZKWatcher zk) {
     this(conf, abort);
     setZk(zk);
   }
 
-  public ZooKeeperWatcher getZk() {
+  public ZKWatcher getZk() {
     return zk;
   }
 
-  public void setZk(ZooKeeperWatcher zk) {
+  public void setZk(ZKWatcher zk) {
     this.zk = zk;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java
index 67258c7..9b79294 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientArguments.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hbase.replication;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 
 /**
  * Wrapper around common arguments used to construct ReplicationQueuesClient. Used to construct
@@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 @InterfaceAudience.Private
 public class ReplicationQueuesClientArguments extends ReplicationQueuesArguments {
   public ReplicationQueuesClientArguments(Configuration conf, Abortable abort,
-     ZooKeeperWatcher zk) {
+     ZKWatcher zk) {
     super(conf, abort, zk);
   }
   public ReplicationQueuesClientArguments(Configuration conf, Abortable abort) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
index 49e55ef..b998f15 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesClientZKImpl.java
@@ -21,16 +21,16 @@ package org.apache.hadoop.hbase.replication;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet;
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.ImmutableSet;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.apache.zookeeper.data.Stat;
 
@@ -44,8 +44,8 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
     this(args.getZk(), args.getConf(), args.getAbortable());
   }
 
-  public ReplicationQueuesClientZKImpl(final ZooKeeperWatcher zk, Configuration conf,
-      Abortable abortable) {
+  public ReplicationQueuesClientZKImpl(final ZKWatcher zk, Configuration conf,
+                                       Abortable abortable) {
     super(zk, conf, abortable);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index 7b1d5c2..95fd294 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -25,20 +25,18 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
 /**
@@ -75,8 +73,8 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
     this(args.getZk(), args.getConf(), args.getAbortable());
   }
 
-  public ReplicationQueuesZKImpl(final ZooKeeperWatcher zk, Configuration conf,
-      Abortable abortable) {
+  public ReplicationQueuesZKImpl(final ZKWatcher zk, Configuration conf,
+                                 Abortable abortable) {
     super(zk, conf, abortable);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
index e96401e..ad970c6 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStateZKBase.java
@@ -22,21 +22,19 @@ import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
 import org.apache.hadoop.hbase.zookeeper.ZKConfig;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
-
 /**
  * This is a base class for maintaining replication state in zookeeper.
  */
@@ -61,7 +59,7 @@ public abstract class ReplicationStateZKBase {
   /** The name of the znode that contains tableCFs */
   protected final String tableCFsNodeName;
 
-  protected final ZooKeeperWatcher zookeeper;
+  protected final ZKWatcher zookeeper;
   protected final Configuration conf;
   protected final Abortable abortable;
 
@@ -74,8 +72,8 @@ public abstract class ReplicationStateZKBase {
       "zookeeper.znode.replication.hfile.refs";
   public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs";
 
-  public ReplicationStateZKBase(ZooKeeperWatcher zookeeper, Configuration conf,
-      Abortable abortable) {
+  public ReplicationStateZKBase(ZKWatcher zookeeper, Configuration conf,
+                                Abortable abortable) {
     this.zookeeper = zookeeper;
     this.conf = conf;
     this.abortable = abortable;

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
index ade1c4d..aa72fc5 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java
@@ -24,13 +24,13 @@ import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.zookeeper.ZKListener;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.Stoppable;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
 /**
@@ -50,7 +50,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
   private final ArrayList<String> otherRegionServers = new ArrayList<>();
   private final ReplicationPeers replicationPeers;
 
-  public ReplicationTrackerZKImpl(ZooKeeperWatcher zookeeper,
+  public ReplicationTrackerZKImpl(ZKWatcher zookeeper,
       final ReplicationPeers replicationPeers, Configuration conf, Abortable abortable,
       Stoppable stopper) {
     super(zookeeper, conf, abortable);
@@ -88,12 +88,12 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
    * Watcher used to be notified of the other region server's death in the local cluster. It
    * initiates the process to transfer the queues if it is able to grab the lock.
    */
-  public class OtherRegionServerWatcher extends ZooKeeperListener {
+  public class OtherRegionServerWatcher extends ZKListener {
 
     /**
      * Construct a ZooKeeper event listener.
      */
-    public OtherRegionServerWatcher(ZooKeeperWatcher watcher) {
+    public OtherRegionServerWatcher(ZKWatcher watcher) {
       super(watcher);
     }
 
@@ -145,12 +145,12 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements
   /**
    * Watcher used to follow the creation and deletion of peer clusters.
    */
-  public class PeersWatcher extends ZooKeeperListener {
+  public class PeersWatcher extends ZKListener {
 
     /**
      * Construct a ZooKeeper event listener.
      */
-    public PeersWatcher(ZooKeeperWatcher watcher) {
+    public PeersWatcher(ZKWatcher watcher) {
       super(watcher);
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
index cf1ff20..1f239f8 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/TableBasedReplicationQueuesImpl.java
@@ -37,11 +37,10 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.filter.CompareFilter;
 import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.KeeperException;
 
 import java.io.IOException;
@@ -77,7 +76,7 @@ public class TableBasedReplicationQueuesImpl extends ReplicationTableBase
     this(args.getConf(), args.getAbortable(), args.getZk());
   }
 
-  public TableBasedReplicationQueuesImpl(Configuration conf, Abortable abort, ZooKeeperWatcher zkw)
+  public TableBasedReplicationQueuesImpl(Configuration conf, Abortable abort, ZKWatcher zkw)
     throws IOException {
     super(conf, abort);
     replicationState = new ReplicationStateZKBase(zkw, conf, abort) {};

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
index 3d152bb..5fca659 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupInfoManagerImpl.java
@@ -77,8 +77,8 @@ import org.apache.hadoop.hbase.regionserver.DisabledRegionSplitPolicy;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 
@@ -141,7 +141,7 @@ class RSGroupInfoManagerImpl implements RSGroupInfoManager {
   private final MasterServices masterServices;
   private Table rsGroupTable;
   private final ClusterConnection conn;
-  private final ZooKeeperWatcher watcher;
+  private final ZKWatcher watcher;
   private final RSGroupStartupWorker rsGroupStartupWorker = new RSGroupStartupWorker();
   // contains list of groups that were last flushed to persistent store
   private Set<String> prevRSGroups = new HashSet<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
index 9077f15..ba3534d 100644
--- a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
+++ b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdminClient.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Result;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupProtos;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 import org.junit.Assert;
 
@@ -45,14 +45,14 @@ import java.util.Set;
 @InterfaceAudience.Private
 public class VerifyingRSGroupAdminClient implements RSGroupAdmin {
   private Table table;
-  private ZooKeeperWatcher zkw;
+  private ZKWatcher zkw;
   private RSGroupAdmin wrapped;
 
   public VerifyingRSGroupAdminClient(RSGroupAdmin RSGroupAdmin, Configuration conf)
       throws IOException {
     wrapped = RSGroupAdmin;
     table = ConnectionFactory.createConnection(conf).getTable(RSGroupInfoManager.RSGROUP_TABLE_NAME);
-    zkw = new ZooKeeperWatcher(conf, this.getClass().getSimpleName(), null);
+    zkw = new ZKWatcher(conf, this.getClass().getSimpleName(), null);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-server/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 1e5a1f3..daa34f8 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -390,6 +390,10 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hbase</groupId>
+      <artifactId>hbase-zookeeper</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hbase</groupId>
       <artifactId>hbase-replication</artifactId>
     </dependency>
     <dependency>

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
index a6b39f3..0d9eba8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/Server.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import java.io.IOException;
@@ -43,7 +43,7 @@ public interface Server extends Abortable, Stoppable {
   /**
    * Gets the ZooKeeper instance for this server.
    */
-  ZooKeeperWatcher getZooKeeper();
+  ZKWatcher getZooKeeper();
 
   /**
    * Returns a reference to the servers' connection.

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
index c4fad44..160d058 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
@@ -27,8 +27,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZKListener;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.zookeeper.KeeperException;
 
 import java.io.IOException;
@@ -48,12 +48,12 @@ import java.util.concurrent.ConcurrentSkipListMap;
  *
  */
 @InterfaceAudience.Private
-public class ZKNamespaceManager extends ZooKeeperListener {
+public class ZKNamespaceManager extends ZKListener {
   private static final Log LOG = LogFactory.getLog(ZKNamespaceManager.class);
   private final String nsZNode;
   private volatile NavigableMap<String,NamespaceDescriptor> cache;
 
-  public ZKNamespaceManager(ZooKeeperWatcher zkw) throws IOException {
+  public ZKNamespaceManager(ZKWatcher zkw) throws IOException {
     super(zkw);
     nsZNode = zkw.znodePaths.namespaceZNode;
     cache = new ConcurrentSkipListMap<>();

http://git-wip-us.apache.org/repos/asf/hbase/blob/330b0d05/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
index d7fdeb7..cda5aff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZNodeClearer.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
 
 /**
@@ -132,7 +132,7 @@ public class ZNodeClearer {
    * @param rsZnodePath from HBASE_ZNODE_FILE
    * @return String representation of ServerName or null if fails
    */
-  
+
   public static String parseMasterServerName(String rsZnodePath) {
     String masterServerName = null;
     try {
@@ -141,12 +141,12 @@ public class ZNodeClearer {
     } catch (IndexOutOfBoundsException e) {
       LOG.warn("String " + rsZnodePath + " has wrong format", e);
     }
-    return masterServerName; 
+    return masterServerName;
   }
-  
+
   /**
-   * 
-   * @return true if cluster is configured with master-rs collocation 
+   *
+   * @return true if cluster is configured with master-rs collocation
    */
   private static boolean tablesOnMaster(Configuration conf) {
     boolean tablesOnMaster = true;
@@ -167,9 +167,9 @@ public class ZNodeClearer {
     Configuration tempConf = new Configuration(conf);
     tempConf.setInt("zookeeper.recovery.retry", 0);
 
-    ZooKeeperWatcher zkw;
+    ZKWatcher zkw;
     try {
-      zkw = new ZooKeeperWatcher(tempConf, "clean znode for master",
+      zkw = new ZKWatcher(tempConf, "clean znode for master",
           new Abortable() {
             @Override public void abort(String why, Throwable e) {}
             @Override public boolean isAborted() { return false; }