You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jx...@apache.org on 2014/08/07 01:22:50 UTC

[06/10] HBASE-11611 Clean up ZK-based region assignment

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f8e99f1..cb301b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -258,12 +258,11 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
    * {@link #finishActiveMasterInitialization(MonitoredTask)} after
    * the master becomes the active one.
    *
-   * @throws InterruptedException
    * @throws KeeperException
    * @throws IOException
    */
   public HMaster(final Configuration conf, CoordinatedStateManager csm)
-      throws IOException, KeeperException, InterruptedException {
+      throws IOException, KeeperException {
     super(conf, csm);
     this.rsFatals = new MemoryBoundedLogMessageBuffer(
       conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));
@@ -413,7 +412,6 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     this.assignmentManager = new AssignmentManager(this, serverManager,
       this.balancer, this.service, this.metricsMaster,
       this.tableLockManager);
-    zooKeeper.registerListenerFirst(assignmentManager);
 
     this.regionServerTracker = new RegionServerTracker(zooKeeper, this,
         this.serverManager);
@@ -674,34 +672,29 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
 
     RegionStates regionStates = assignmentManager.getRegionStates();
     regionStates.createRegionState(HRegionInfo.FIRST_META_REGIONINFO);
-    boolean rit = this.assignmentManager
-      .processRegionInTransitionAndBlockUntilAssigned(HRegionInfo.FIRST_META_REGIONINFO);
     boolean metaRegionLocation = metaTableLocator.verifyMetaRegionLocation(
       this.getShortCircuitConnection(), this.getZooKeeper(), timeout);
     ServerName currentMetaServer = metaTableLocator.getMetaRegionLocation(this.getZooKeeper());
     if (!metaRegionLocation) {
       // Meta location is not verified. It should be in transition, or offline.
       // We will wait for it to be assigned in enableSSHandWaitForMeta below.
-      assigned++;
-      if (!rit) {
-        // Assign meta since not already in transition
-        if (currentMetaServer != null) {
-          // If the meta server is not known to be dead or online,
-          // just split the meta log, and don't expire it since this
-          // could be a full cluster restart. Otherwise, we will think
-          // this is a failover and lose previous region locations.
-          // If it is really a failover case, AM will find out in rebuilding
-          // user regions. Otherwise, we are good since all logs are split
-          // or known to be replayed before user regions are assigned.
-          if (serverManager.isServerOnline(currentMetaServer)) {
-            LOG.info("Forcing expire of " + currentMetaServer);
-            serverManager.expireServer(currentMetaServer);
-          }
-          splitMetaLogBeforeAssignment(currentMetaServer);
-          previouslyFailedMetaRSs.add(currentMetaServer);
+      if (currentMetaServer != null) {
+        // If the meta server is not known to be dead or online,
+        // just split the meta log, and don't expire it since this
+        // could be a full cluster restart. Otherwise, we will think
+        // this is a failover and lose previous region locations.
+        // If it is really a failover case, AM will find out in rebuilding
+        // user regions. Otherwise, we are good since all logs are split
+        // or known to be replayed before user regions are assigned.
+        if (serverManager.isServerOnline(currentMetaServer)) {
+          LOG.info("Forcing expire of " + currentMetaServer);
+          serverManager.expireServer(currentMetaServer);
         }
-        assignmentManager.assignMeta();
+        splitMetaLogBeforeAssignment(currentMetaServer);
+        previouslyFailedMetaRSs.add(currentMetaServer);
       }
+      assignmentManager.assignMeta();
+      assigned++;
     } else {
       // Region already assigned. We didn't assign it. Add to in-memory state.
       regionStates.updateRegionState(
@@ -725,8 +718,8 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
     // No need to wait for meta is assigned = 0 when meta is just verified.
     enableServerShutdownHandler(assigned != 0);
 
-    LOG.info("hbase:meta assigned=" + assigned + ", rit=" + rit +
-      ", location=" + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
+    LOG.info("hbase:meta assigned=" + assigned + ", location="
+      + metaTableLocator.getMetaRegionLocation(this.getZooKeeper()));
     status.setStatus("META assigned.");
   }
 
@@ -1736,7 +1729,7 @@ public class HMaster extends HRegionServer implements MasterServices, Server {
   }
 
   public void assignRegion(HRegionInfo hri) {
-    assignmentManager.assign(hri, true);
+    assignmentManager.assign(hri);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index d4f3d6d..d6f825b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -359,7 +359,7 @@ public class MasterRpcServices extends RSRpcServices
       }
       LOG.info(master.getClientIdAuditPrefix()
         + " assign " + regionInfo.getRegionNameAsString());
-      master.assignmentManager.assign(regionInfo, true, true);
+      master.assignmentManager.assign(regionInfo, true);
       if (master.cpHost != null) {
         master.cpHost.postAssign(regionInfo);
       }
@@ -1074,6 +1074,7 @@ public class MasterRpcServices extends RSRpcServices
    *
    */
   @Override
+  @SuppressWarnings("deprecation")
   public OfflineRegionResponse offlineRegion(RpcController controller,
       OfflineRegionRequest request) throws ServiceException {
     final byte [] regionName = request.getRegion().getValue().toByteArray();
@@ -1203,6 +1204,7 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  @SuppressWarnings("deprecation")
   public UnassignRegionResponse unassignRegion(RpcController controller,
       UnassignRegionRequest req) throws ServiceException {
     try {

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java
deleted file mode 100644
index c93dbe1..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/OfflineCallback.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import java.util.Map;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.AsyncCallback.StringCallback;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.data.Stat;
-
-/**
- * Callback handler for creating unassigned offline znodes
- * used during bulk assign, async setting region to offline.
- */
-@InterfaceAudience.Private
-public class OfflineCallback implements StringCallback {
-  private static final Log LOG = LogFactory.getLog(OfflineCallback.class);
-  private final ExistCallback callBack;
-  private final ZooKeeperWatcher zkw;
-  private final ServerName destination;
-  private final AtomicInteger counter;
-
-  OfflineCallback(final ZooKeeperWatcher zkw,
-      final ServerName destination, final AtomicInteger counter,
-      final Map<String, Integer> offlineNodesVersions) {
-    this.callBack = new ExistCallback(
-      destination, counter, offlineNodesVersions);
-    this.destination = destination;
-    this.counter = counter;
-    this.zkw = zkw;
-  }
-
-  @Override
-  public void processResult(int rc, String path, Object ctx, String name) {
-    if (rc == KeeperException.Code.NODEEXISTS.intValue()) {
-      LOG.warn("Node for " + path + " already exists");
-    } else if (rc != 0) {
-      // This is result code.  If non-zero, need to resubmit.
-      LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " +
-        "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2");
-      this.counter.addAndGet(1);
-      return;
-    }
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("rs=" + ctx + ", server=" + destination);
-    }
-    // Async exists to set a watcher so we'll get triggered when
-    // unassigned node changes.
-    ZooKeeper zk = this.zkw.getRecoverableZooKeeper().getZooKeeper();
-    zk.exists(path, this.zkw, callBack, ctx);
-  }
-
-  /**
-   * Callback handler for the exists call that sets watcher on unassigned znodes.
-   * Used during bulk assign on startup.
-   */
-  static class ExistCallback implements StatCallback {
-    private static final Log LOG = LogFactory.getLog(ExistCallback.class);
-    private final Map<String, Integer> offlineNodesVersions;
-    private final AtomicInteger counter;
-    private ServerName destination;
-
-    ExistCallback(final ServerName destination,
-        final AtomicInteger counter,
-        final Map<String, Integer> offlineNodesVersions) {
-      this.offlineNodesVersions = offlineNodesVersions;
-      this.destination = destination;
-      this.counter = counter;
-    }
-
-    @Override
-    public void processResult(int rc, String path, Object ctx, Stat stat) {
-      if (rc != 0) {
-        // This is result code.  If non-zero, need to resubmit.
-        LOG.warn("rc != 0 for " + path + " -- retryable connectionloss -- " +
-          "FIX see http://wiki.apache.org/hadoop/ZooKeeper/FAQ#A2");
-        this.counter.addAndGet(1);
-        return;
-      }
-
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("rs=" + ctx + ", server=" + destination);
-      }
-      HRegionInfo region = ((RegionState)ctx).getRegion();
-      offlineNodesVersions.put(
-        region.getEncodedName(), Integer.valueOf(stat.getVersion()));
-      this.counter.addAndGet(1);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
index 8e1e040..0e6e69e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStateStore.java
@@ -22,16 +22,15 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionLocations;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Put;
@@ -40,7 +39,6 @@ import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ConfigUtil;
 
 import com.google.common.base.Preconditions;
 
@@ -59,7 +57,6 @@ public class RegionStateStore {
   private volatile HTableInterface metaTable;
   private volatile boolean initialized;
 
-  private final boolean noPersistence;
   private final Server server;
 
   /**
@@ -131,25 +128,19 @@ public class RegionStateStore {
   }
 
   RegionStateStore(final Server server) {
-    Configuration conf = server.getConfiguration();
-    // No need to persist if using ZK but not migrating
-    noPersistence = ConfigUtil.useZKForAssignment(conf)
-      && !conf.getBoolean("hbase.assignment.usezk.migrating", false);
     this.server = server;
     initialized = false;
   }
 
   @SuppressWarnings("deprecation")
   void start() throws IOException {
-    if (!noPersistence) {
-      if (server instanceof RegionServerServices) {
-        metaRegion = ((RegionServerServices)server).getFromOnlineRegions(
-          HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
-      }
-      if (metaRegion == null) {
-        metaTable = new HTable(TableName.META_TABLE_NAME,
-          server.getShortCircuitConnection());
-      }
+    if (server instanceof RegionServerServices) {
+      metaRegion = ((RegionServerServices)server).getFromOnlineRegions(
+        HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
+    }
+    if (metaRegion == null) {
+      metaTable = new HTable(TableName.META_TABLE_NAME,
+        server.getShortCircuitConnection());
     }
     initialized = true;
   }
@@ -170,7 +161,7 @@ public class RegionStateStore {
   @SuppressWarnings("deprecation")
   void updateRegionState(long openSeqNum,
       RegionState newState, RegionState oldState) {
-    if (noPersistence || !initialized) {
+    if (!initialized) {
       return;
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
index 67eda4a..f111107 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/RegionStates.java
@@ -34,21 +34,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.RegionTransition;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableStateManager;
 import org.apache.hadoop.hbase.client.RegionReplicaUtil;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.zookeeper.KeeperException;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -351,28 +347,6 @@ public class RegionStates {
 
   /**
    * Update a region state. It will be put in transition if not already there.
-   *
-   * If we can't find the region info based on the region name in
-   * the transition, log a warning and return null.
-   */
-  public RegionState updateRegionState(
-      final RegionTransition transition, final State state) {
-    byte [] regionName = transition.getRegionName();
-    HRegionInfo regionInfo = getRegionInfo(regionName);
-    if (regionInfo == null) {
-      String prettyRegionName = HRegionInfo.prettyPrint(
-        HRegionInfo.encodeRegionName(regionName));
-      LOG.warn("Failed to find region " + prettyRegionName
-        + " in updating its state to " + state
-        + " based on region transition " + transition);
-      return null;
-    }
-    return updateRegionState(regionInfo, state,
-      transition.getServerName());
-  }
-
-  /**
-   * Update a region state. It will be put in transition if not already there.
    */
   public RegionState updateRegionState(
       final HRegionInfo hri, final State state, final ServerName serverName) {
@@ -548,8 +522,7 @@ public class RegionStates {
   /**
    * A server is offline, all regions on it are dead.
    */
-  public synchronized List<HRegionInfo> serverOffline(
-      final ZooKeeperWatcher watcher, final ServerName sn) {
+  public synchronized List<HRegionInfo> serverOffline(final ServerName sn) {
     // Offline all regions on this server not already in transition.
     List<HRegionInfo> rits = new ArrayList<HRegionInfo>();
     Set<HRegionInfo> assignedRegions = serverHoldings.get(sn);
@@ -565,13 +538,7 @@ public class RegionStates {
         regionsToOffline.add(region);
       } else if (isRegionInState(region, State.SPLITTING, State.MERGING)) {
         LOG.debug("Offline splitting/merging region " + getRegionState(region));
-        try {
-          // Delete the ZNode if exists
-          ZKAssign.deleteNodeFailSilent(watcher, region);
-          regionsToOffline.add(region);
-        } catch (KeeperException ke) {
-          server.abort("Unexpected ZK exception deleting node " + region, ke);
-        }
+        regionsToOffline.add(region);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
index 6204206..9390eba 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java
@@ -64,7 +64,7 @@ import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.R
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Triple;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.zookeeper.KeeperException;
@@ -159,7 +159,7 @@ public class ServerManager {
    * handler is not enabled, is queued up.
    * <p>
    * So this is a set of region servers known to be dead but not submitted to
-   * ServerShutdownHander for processing yet.
+   * ServerShutdownHandler for processing yet.
    */
   private Set<ServerName> queuedDeadServers = new HashSet<ServerName>();
 
@@ -310,7 +310,7 @@ public class ServerManager {
    * Check is a server of same host and port already exists,
    * if not, or the existed one got a smaller start code, record it.
    *
-   * @param sn the server to check and record
+   * @param serverName the server to check and record
    * @param sl the server load on the server
    * @return true if the server is recorded, otherwise, false
    */
@@ -717,12 +717,10 @@ public class ServerManager {
    * <p>
    * @param server server to open a region
    * @param region region to open
-   * @param versionOfOfflineNode that needs to be present in the offline node
-   * when RS tries to change the state from OFFLINE to other states.
    * @param favoredNodes
    */
   public RegionOpeningState sendRegionOpen(final ServerName server,
-      HRegionInfo region, int versionOfOfflineNode, List<ServerName> favoredNodes)
+      HRegionInfo region, List<ServerName> favoredNodes)
   throws IOException {
     AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
@@ -730,8 +728,8 @@ public class ServerManager {
         " failed because no RPC connection found to this server");
       return RegionOpeningState.FAILED_OPENING;
     }
-    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, 
-      region, versionOfOfflineNode, favoredNodes, 
+    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server,
+      region, favoredNodes,
       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
     try {
       OpenRegionResponse response = admin.openRegion(null, request);
@@ -751,7 +749,7 @@ public class ServerManager {
    * @return a list of region opening states
    */
   public List<RegionOpeningState> sendRegionOpen(ServerName server,
-      List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos)
+      List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos)
   throws IOException {
     AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
@@ -760,7 +758,7 @@ public class ServerManager {
       return null;
     }
 
-    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(regionOpenInfos, 
+    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(regionOpenInfos,
       (RecoveryMode.LOG_REPLAY == this.services.getMasterFileSystem().getLogRecoveryMode()));
     try {
       OpenRegionResponse response = admin.openRegion(null, request);
@@ -777,15 +775,11 @@ public class ServerManager {
    * have the specified region or the region is being split.
    * @param server server to open a region
    * @param region region to open
-   * @param versionOfClosingNode
-   *   the version of znode to compare when RS transitions the znode from
-   *   CLOSING state.
    * @param dest - if the region is moved to another server, the destination server. null otherwise.
-   * @return true if server acknowledged close, false if not
    * @throws IOException
    */
   public boolean sendRegionClose(ServerName server, HRegionInfo region,
-    int versionOfClosingNode, ServerName dest, boolean transitionInZK) throws IOException {
+      ServerName dest) throws IOException {
     if (server == null) throw new NullPointerException("Passed server is null");
     AdminService.BlockingInterface admin = getRsAdmin(server);
     if (admin == null) {
@@ -795,12 +789,12 @@ public class ServerManager {
         " failed because no RPC connection found to this server");
     }
     return ProtobufUtil.closeRegion(admin, server, region.getRegionName(),
-      versionOfClosingNode, dest, transitionInZK);
+      dest);
   }
 
   public boolean sendRegionClose(ServerName server,
-      HRegionInfo region, int versionOfClosingNode) throws IOException {
-    return sendRegionClose(server, region, versionOfClosingNode, null, true);
+      HRegionInfo region) throws IOException {
+    return sendRegionClose(server, region, null);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
deleted file mode 100644
index b01434e..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ClosedRegionHandler.java
+++ /dev/null
@@ -1,108 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.handler;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-import org.apache.hadoop.hbase.master.RegionState;
-import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
-
-/**
- * Handles CLOSED region event on Master.
- * <p>
- * If table is being disabled, deletes ZK unassigned node and removes from
- * regions in transition.
- * <p>
- * Otherwise, assigns the region to another server.
- */
-@InterfaceAudience.Private
-public class ClosedRegionHandler extends EventHandler implements TotesHRegionInfo {
-  private static final Log LOG = LogFactory.getLog(ClosedRegionHandler.class);
-  private final AssignmentManager assignmentManager;
-  private final HRegionInfo regionInfo;
-  private final ClosedPriority priority;
-
-  private enum ClosedPriority {
-    META (1),
-    USER (2);
-
-    private final int value;
-    ClosedPriority(int value) {
-      this.value = value;
-    }
-    public int getValue() {
-      return value;
-    }
-  };
-
-  public ClosedRegionHandler(Server server, AssignmentManager assignmentManager,
-      HRegionInfo regionInfo) {
-    super(server, EventType.RS_ZK_REGION_CLOSED);
-    this.assignmentManager = assignmentManager;
-    this.regionInfo = regionInfo;
-    if(regionInfo.isMetaRegion()) {
-      priority = ClosedPriority.META;
-    } else {
-      priority = ClosedPriority.USER;
-    }
-  }
-
-  @Override
-  public int getPriority() {
-    return priority.getValue();
-  }
-
-  @Override
-  public HRegionInfo getHRegionInfo() {
-    return this.regionInfo;
-  }
-
-  @Override
-  public String toString() {
-    String name = "UnknownServerName";
-    if(server != null && server.getServerName() != null) {
-      name = server.getServerName().toString();
-    }
-    return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
-  }
-
-  @Override
-  public void process() {
-    LOG.debug("Handling CLOSED event for " + regionInfo.getEncodedName());
-    // Check if this table is being disabled or not
-    if (this.assignmentManager.getTableStateManager().isTableState(this.regionInfo.getTable(),
-        ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING) ||
-        assignmentManager.getReplicasToClose().contains(regionInfo)) {
-      assignmentManager.offlineDisabledRegion(regionInfo);
-      return;
-    }
-    // ZK Node is in CLOSED state, assign it.
-    assignmentManager.getRegionStates().updateRegionState(
-      regionInfo, RegionState.State.CLOSED);
-    // This below has to do w/ online enable/disable of a table
-    assignmentManager.removeClosedRegion(regionInfo);
-    assignmentManager.assign(regionInfo, true);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java
deleted file mode 100644
index a2dc41b..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/OpenedRegionHandler.java
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.handler;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.Server;
-import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
-import org.apache.hadoop.hbase.executor.EventHandler;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.master.AssignmentManager;
-
-/**
- * Handles OPENED region event on Master.
- */
-@InterfaceAudience.Private
-public class OpenedRegionHandler extends EventHandler implements TotesHRegionInfo {
-  private static final Log LOG = LogFactory.getLog(OpenedRegionHandler.class);
-  private final AssignmentManager assignmentManager;
-  private final HRegionInfo regionInfo;
-  private final OpenedPriority priority;
-
-  private OpenRegionCoordination coordination;
-  private OpenRegionCoordination.OpenRegionDetails ord;
-
-  private enum OpenedPriority {
-    META (1),
-    SYSTEM (2),
-    USER (3);
-
-    private final int value;
-    OpenedPriority(int value) {
-      this.value = value;
-    }
-    public int getValue() {
-      return value;
-    }
-  };
-
-  public OpenedRegionHandler(Server server,
-      AssignmentManager assignmentManager, HRegionInfo regionInfo,
-      OpenRegionCoordination coordination,
-      OpenRegionCoordination.OpenRegionDetails ord) {
-    super(server, EventType.RS_ZK_REGION_OPENED);
-    this.assignmentManager = assignmentManager;
-    this.regionInfo = regionInfo;
-    this.coordination = coordination;
-    this.ord = ord;
-    if(regionInfo.isMetaRegion()) {
-      priority = OpenedPriority.META;
-    } else if(regionInfo.getTable()
-        .getNamespaceAsString().equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
-      priority = OpenedPriority.SYSTEM;
-    } else {
-      priority = OpenedPriority.USER;
-    }
-  }
-
-  @Override
-  public int getPriority() {
-    return priority.getValue();
-  }
-
-  @Override
-  public HRegionInfo getHRegionInfo() {
-    return this.regionInfo;
-  }
-
-  @Override
-  public String toString() {
-    String name = "UnknownServerName";
-    if(server != null && server.getServerName() != null) {
-      name = server.getServerName().toString();
-    }
-    return getClass().getSimpleName() + "-" + name + "-" + getSeqid();
-  }
-
-  @Override
-  public void process() {
-    if (!coordination.commitOpenOnMasterSide(assignmentManager,regionInfo, ord)) {
-        assignmentManager.unassign(regionInfo);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
index f6d798a..5d26ac8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/ServerShutdownHandler.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.executor.EventHandler;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.master.AssignmentManager;
@@ -45,9 +44,6 @@ import org.apache.hadoop.hbase.master.RegionStates;
 import org.apache.hadoop.hbase.master.ServerManager;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
-import org.apache.hadoop.hbase.util.ConfigUtil;
-import org.apache.hadoop.hbase.zookeeper.ZKAssign;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * Process server shutdown.
@@ -162,24 +158,15 @@ public class ServerShutdownHandler extends EventHandler {
           server.getMetaTableLocator().waitMetaRegionLocation(server.getZooKeeper());
           // Skip getting user regions if the server is stopped.
           if (!this.server.isStopped()) {
-            if (ConfigUtil.useZKForAssignment(server.getConfiguration())) {
-              hris = MetaTableAccessor.getServerUserRegions(this.server.getShortCircuitConnection(),
-                this.serverName).keySet();
-            } else {
-              // Not using ZK for assignment, regionStates has everything we want
-              hris = am.getRegionStates().getServerRegions(serverName);
-              if (hris != null) {
-                hris.remove(HRegionInfo.FIRST_META_REGIONINFO);
-              }
+            hris = am.getRegionStates().getServerRegions(serverName);
+            if (hris != null) {
+              hris.remove(HRegionInfo.FIRST_META_REGIONINFO);
             }
           }
           break;
         } catch (InterruptedException e) {
           Thread.currentThread().interrupt();
           throw (InterruptedIOException)new InterruptedIOException().initCause(e);
-        } catch (IOException ioe) {
-          LOG.info("Received exception accessing hbase:meta during server shutdown of " +
-            serverName + ", retrying hbase:meta read", ioe);
         }
       }
       if (this.server.isStopped()) {
@@ -249,15 +236,8 @@ public class ServerShutdownHandler extends EventHandler {
                   LOG.info("Skip assigning region in transition on other server" + rit);
                   continue;
                 }
-                try{
-                  //clean zk node
-                  LOG.info("Reassigning region with rs = " + rit + " and deleting zk node if exists");
-                  ZKAssign.deleteNodeFailSilent(services.getZooKeeper(), hri);
-                  regionStates.updateRegionState(hri, State.OFFLINE);
-                } catch (KeeperException ke) {
-                  this.server.abort("Unexpected ZK exception deleting unassigned node " + hri, ke);
-                  return;
-                }
+                LOG.info("Reassigning region with rs = " + rit);
+                regionStates.updateRegionState(hri, State.OFFLINE);
               } else if (regionStates.isRegionInState(
                   hri, State.SPLITTING_NEW, State.MERGING_NEW)) {
                 regionStates.updateRegionState(hri, State.OFFLINE);
@@ -274,7 +254,6 @@ public class ServerShutdownHandler extends EventHandler {
                 // but though we did assign we will not be clearing the znode in CLOSING state.
                 // Doing this will have no harm. See HBASE-5927
                 regionStates.updateRegionState(hri, State.OFFLINE);
-                am.deleteClosingOrClosedNode(hri, rit.getServerName());
                 am.offlineDisabledRegion(hri);
               } else {
                 LOG.warn("THIS SHOULD NOT HAPPEN: unexpected region in transition "

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/UpgradeTo96.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/UpgradeTo96.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/UpgradeTo96.java
deleted file mode 100644
index 6df2eab..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/migration/UpgradeTo96.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.migration;
-
-import java.io.IOException;
-import java.util.List;
-
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.HelpFormatter;
-import org.apache.commons.cli.Option;
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.regionserver.wal.HLogSplitter;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HFileV1Detector;
-import org.apache.hadoop.hbase.util.ZKDataMigrator;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-public class UpgradeTo96 extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(UpgradeTo96.class);
-
-  private Options options = new Options();
-  /**
-   * whether to do overall upgrade (namespace and znodes)
-   */
-  private boolean upgrade;
-  /**
-   * whether to check for HFileV1
-   */
-  private boolean checkForHFileV1;
-  /**
-   * Path of directory to check for HFileV1
-   */
-  private String dirToCheckForHFileV1;
-
-  UpgradeTo96() {
-    setOptions();
-  }
-
-  private void setOptions() {
-    options.addOption("h", "help", false, "Help");
-    options.addOption(new Option("check", false, "Run upgrade check; looks for HFileV1 "
-        + " under ${hbase.rootdir} or provided 'dir' directory."));
-    options.addOption(new Option("execute", false, "Run upgrade; zk and hdfs must be up, hbase down"));
-    Option pathOption = new Option("dir", true,
-        "Relative path of dir to check for HFileV1s.");
-    pathOption.setRequired(false);
-    options.addOption(pathOption);
-  }
-
-  private boolean parseOption(String[] args) throws ParseException {
-    if (args.length == 0) return false; // no args shows help.
-
-    CommandLineParser parser = new GnuParser();
-    CommandLine cmd = parser.parse(options, args);
-    if (cmd.hasOption("h")) {
-      return false;
-    }
-    if (cmd.hasOption("execute")) upgrade = true;
-    if (cmd.hasOption("check")) checkForHFileV1 = true;
-    if (checkForHFileV1 && cmd.hasOption("dir")) {
-      this.dirToCheckForHFileV1 = cmd.getOptionValue("dir");
-    }
-    return true;
-  }
-
-  private void printUsage() {
-    HelpFormatter formatter = new HelpFormatter();
-    formatter.printHelp("$bin/hbase upgrade -check [-dir DIR]|-execute", options);
-    System.out.println("Read http://hbase.apache.org/book.html#upgrade0.96 before attempting upgrade");
-    System.out.println();
-    System.out.println("Example usage:");
-    System.out.println();
-    System.out.println("Run upgrade check; looks for HFileV1s under ${hbase.rootdir}:");
-    System.out.println(" $ bin/hbase upgrade -check");
-    System.out.println();
-    System.out.println("Run the upgrade: ");
-    System.out.println(" $ bin/hbase upgrade -execute");
-  }
-
-  @Override
-  public int run(String[] args) throws Exception {
-    if (!parseOption(args)) {
-      printUsage();
-      return -1;
-    }
-    if (checkForHFileV1) {
-      int res = doHFileV1Check();
-      if (res == 0) LOG.info("No HFileV1 found.");
-      else {
-        LOG.warn("There are some HFileV1, or corrupt files (files with incorrect major version).");
-      }
-      return res;
-    }
-    // if the user wants to upgrade, check for any HBase live process.
-    // If yes, prompt the user to stop them
-    else if (upgrade) {
-      if (isAnyHBaseProcessAlive()) {
-        LOG.error("Some HBase processes are still alive, or znodes not expired yet. "
-            + "Please stop them before upgrade or try after some time.");
-        throw new IOException("Some HBase processes are still alive, or znodes not expired yet");
-      }
-      return executeUpgrade();
-    }
-    return -1;
-  }
-
-  private boolean isAnyHBaseProcessAlive() throws IOException {
-    ZooKeeperWatcher zkw = null;
-    try {
-      zkw = new ZooKeeperWatcher(getConf(), "Check Live Processes.", new Abortable() {
-        private boolean aborted = false;
-
-        @Override
-        public void abort(String why, Throwable e) {
-          LOG.warn("Got aborted with reason: " + why + ", and error: " + e);
-          this.aborted = true;
-        }
-
-        @Override
-        public boolean isAborted() {
-          return this.aborted;
-        }
-
-      });
-      boolean liveProcessesExists = false;
-      if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
-        return false;
-      }
-      if (ZKUtil.checkExists(zkw, zkw.backupMasterAddressesZNode) != -1) {
-        List<String> backupMasters = ZKUtil
-            .listChildrenNoWatch(zkw, zkw.backupMasterAddressesZNode);
-        if (!backupMasters.isEmpty()) {
-          LOG.warn("Backup master(s) " + backupMasters
-              + " are alive or backup-master znodes not expired.");
-          liveProcessesExists = true;
-        }
-      }
-      if (ZKUtil.checkExists(zkw, zkw.rsZNode) != -1) {
-        List<String> regionServers = ZKUtil.listChildrenNoWatch(zkw, zkw.rsZNode);
-        if (!regionServers.isEmpty()) {
-          LOG.warn("Region server(s) " + regionServers + " are alive or rs znodes not expired.");
-          liveProcessesExists = true;
-        }
-      }
-      if (ZKUtil.checkExists(zkw, zkw.getMasterAddressZNode()) != -1) {
-        byte[] data = ZKUtil.getData(zkw, zkw.getMasterAddressZNode());
-        if (data != null && !Bytes.equals(data, HConstants.EMPTY_BYTE_ARRAY)) {
-          LOG.warn("Active master at address " + Bytes.toString(data)
-              + " is still alive or master znode not expired.");
-          liveProcessesExists = true;
-        }
-      }
-      return liveProcessesExists;
-    } catch (Exception e) {
-      LOG.error("Got exception while checking live hbase processes", e);
-      throw new IOException(e);
-    } finally {
-      if (zkw != null) {
-        zkw.close();
-      }
-    }
-  }
-
-  private int doHFileV1Check() throws Exception {
-    String[] args = null;
-    if (dirToCheckForHFileV1 != null) args = new String[] { "-p" + dirToCheckForHFileV1 };
-    return ToolRunner.run(getConf(), new HFileV1Detector(), args);
-  }
-
-  /**
-   * Executes the upgrade process. It involves:
-   * <ul>
-   * <li> Upgrading Namespace
-   * <li> Upgrading Znodes
-   * <li> Log splitting
-   * </ul>
-   * @throws Exception
-   */
-  private int executeUpgrade() throws Exception {
-    executeTool("Namespace upgrade", new NamespaceUpgrade(),
-      new String[] { "--upgrade" }, 0);
-    executeTool("Znode upgrade", new ZKDataMigrator(), null, 0);
-    doOfflineLogSplitting();
-    return 0;
-  }
-
-  private void executeTool(String toolMessage, Tool tool, String[] args, int expectedResult)
-      throws Exception {
-    LOG.info("Starting " + toolMessage);
-    int res = ToolRunner.run(getConf(), tool, new String[] { "--upgrade" });
-    if (res != expectedResult) {
-      LOG.error(toolMessage + "returned " + res + ", expected " + expectedResult);
-      throw new Exception("Unexpected return code from " + toolMessage);
-    }
-    LOG.info("Successfully completed " + toolMessage);
-  }
-
-  /**
-   * Performs log splitting for all regionserver directories.
-   * @throws Exception
-   */
-  private void doOfflineLogSplitting() throws Exception {
-    LOG.info("Starting Log splitting");
-    final Path rootDir = FSUtils.getRootDir(getConf());
-    final Path oldLogDir = new Path(rootDir, HConstants.HREGION_OLDLOGDIR_NAME);
-    FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
-    Path logDir = new Path(rootDir, HConstants.HREGION_LOGDIR_NAME);
-    FileStatus[] regionServerLogDirs = FSUtils.listStatus(fs, logDir);
-    if (regionServerLogDirs == null || regionServerLogDirs.length == 0) {
-      LOG.info("No log directories to split, returning");
-      return;
-    }
-    try {
-      for (FileStatus regionServerLogDir : regionServerLogDirs) {
-        // split its log dir, if exists
-        HLogSplitter.split(rootDir, regionServerLogDir.getPath(), oldLogDir, fs, getConf());
-      }
-      LOG.info("Successfully completed Log splitting");
-    } catch (Exception e) {
-      LOG.error("Got exception while doing Log splitting ", e);
-      throw e;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    System.exit(ToolRunner.run(HBaseConfiguration.create(), new UpgradeTo96(), args));
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 9c28bfc..46a1e51 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -76,7 +76,6 @@ import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
-import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
 import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
 import org.apache.hadoop.hbase.exceptions.RegionMovedException;
 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
@@ -123,7 +122,6 @@ import org.apache.hadoop.hbase.trace.SpanReceiverHost;
 import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
-import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
@@ -149,6 +147,7 @@ import org.apache.zookeeper.KeeperException.NoNodeException;
 import org.apache.zookeeper.data.Stat;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.protobuf.BlockingRpcChannel;
 import com.google.protobuf.ServiceException;
 
@@ -163,6 +162,12 @@ public class HRegionServer extends HasThread implements
 
   public static final Log LOG = LogFactory.getLog(HRegionServer.class);
 
+  /**
+   * For testing only!  Set to true to skip notifying region assignment to master .
+   */
+  @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
+  public static boolean TEST_SKIP_REPORTING_TRANSITION = false;
+
   /*
    * Strings to be used in forming the exception message for
    * RegionsAlreadyInTransitionException.
@@ -410,8 +415,6 @@ public class HRegionServer extends HasThread implements
 
   protected BaseCoordinatedStateManager csm;
 
-  private final boolean useZKForAssignment;
-
   /**
    * Starts a HRegionServer at the default location.
    * @param conf
@@ -427,10 +430,9 @@ public class HRegionServer extends HasThread implements
    * @param conf
    * @param csm implementation of CoordinatedStateManager to be used
    * @throws IOException
-   * @throws InterruptedException
    */
   public HRegionServer(Configuration conf, CoordinatedStateManager csm)
-      throws IOException, InterruptedException {
+      throws IOException {
     this.fsOk = true;
     this.conf = conf;
     checkCodecs(this.conf);
@@ -479,8 +481,6 @@ public class HRegionServer extends HasThread implements
       }
     };
 
-    useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
-
     // Set 'fs.defaultFS' to match the filesystem on hbase.rootdir else
     // underlying hadoop hdfs accessors will be going against wrong filesystem
     // (unless all is set to defaults).
@@ -1719,14 +1719,12 @@ public class HRegionServer extends HasThread implements
     // Update flushed sequence id of a recovering region in ZK
     updateRecoveringRegionLastFlushedSequenceId(r);
 
-    // Update ZK, or META
     if (r.getRegionInfo().isMetaRegion()) {
       MetaTableLocator.setMetaLocation(getZooKeeper(), serverName);
-    } else if (useZKForAssignment) {
-      MetaTableAccessor.updateRegionLocation(shortCircuitConnection, r.getRegionInfo(),
-        this.serverName, openSeqNum);
     }
-    if (!useZKForAssignment && !reportRegionStateTransition(
+
+    // Notify master
+    if (!reportRegionStateTransition(
         TransitionCode.OPENED, openSeqNum, r.getRegionInfo())) {
       throw new IOException("Failed to report opened region to master: "
         + r.getRegionNameAsString());
@@ -1743,6 +1741,22 @@ public class HRegionServer extends HasThread implements
   @Override
   public boolean reportRegionStateTransition(
       TransitionCode code, long openSeqNum, HRegionInfo... hris) {
+    if (TEST_SKIP_REPORTING_TRANSITION) {
+      // This is for testing only in case there is no master
+      // to handle the region transition report at all.
+      if (code == TransitionCode.OPENED) {
+        Preconditions.checkArgument(hris != null && hris.length == 1);
+        try {
+          MetaTableAccessor.updateRegionLocation(shortCircuitConnection,
+            hris[0], serverName, openSeqNum);
+          return true;
+        } catch (IOException e) {
+          LOG.info("Failed to update meta", e);
+          return false;
+        }
+      }
+    }
+
     ReportRegionStateTransitionRequest.Builder builder =
       ReportRegionStateTransitionRequest.newBuilder();
     builder.setServer(ProtobufUtil.toServerName(serverName));
@@ -2428,9 +2442,7 @@ public class HRegionServer extends HasThread implements
    */
   private void closeRegionIgnoreErrors(HRegionInfo region, final boolean abort) {
     try {
-      CloseRegionCoordination.CloseRegionDetails details =
-        csm.getCloseRegionCoordination().getDetaultDetails();
-      if (!closeRegion(region.getEncodedName(), abort, details, null)) {
+      if (!closeRegion(region.getEncodedName(), abort, null)) {
         LOG.warn("Failed to close " + region.getRegionNameAsString() +
             " - ignoring and continuing");
       }
@@ -2455,13 +2467,11 @@ public class HRegionServer extends HasThread implements
    *
    * @param encodedName Region to close
    * @param abort True if we are aborting
-   * @param crd details about closing region coordination-coordinated task
    * @return True if closed a region.
    * @throws NotServingRegionException if the region is not online
    * @throws RegionAlreadyInTransitionException if the region is already closing
    */
-  protected boolean closeRegion(String encodedName, final boolean abort,
-      CloseRegionCoordination.CloseRegionDetails crd, final ServerName sn)
+  protected boolean closeRegion(String encodedName, final boolean abort, final ServerName sn)
       throws NotServingRegionException, RegionAlreadyInTransitionException {
     //Check for permissions to close.
     HRegion actualRegion = this.getFromOnlineRegions(encodedName);
@@ -2485,7 +2495,7 @@ public class HRegionServer extends HasThread implements
         // We're going to try to do a standard close then.
         LOG.warn("The opening for region " + encodedName + " was done before we could cancel it." +
             " Doing a standard close now");
-        return closeRegion(encodedName, abort, crd, sn);
+        return closeRegion(encodedName, abort, sn);
       }
       // Let's get the region from the online region list again
       actualRegion = this.getFromOnlineRegions(encodedName);
@@ -2519,11 +2529,9 @@ public class HRegionServer extends HasThread implements
     CloseRegionHandler crh;
     final HRegionInfo hri = actualRegion.getRegionInfo();
     if (hri.isMetaRegion()) {
-      crh = new CloseMetaHandler(this, this, hri, abort,
-        csm.getCloseRegionCoordination(), crd);
+      crh = new CloseMetaHandler(this, this, hri, abort);
     } else {
-      crh = new CloseRegionHandler(this, this, hri, abort,
-        csm.getCloseRegionCoordination(), crd, sn);
+      crh = new CloseRegionHandler(this, this, hri, abort, sn);
     }
     this.service.submit(crh);
     return true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 04e7995..10da06d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -50,11 +50,11 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValueUtil;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NotServingRegionException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.UnknownScannerException;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.Delete;
@@ -66,7 +66,6 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.exceptions.OperationConflictException;
 import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException;
@@ -146,7 +145,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
 import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
 import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
 import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
 import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
 import org.apache.hadoop.hbase.regionserver.wal.HLog;
@@ -927,10 +925,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
 
       requestCount.increment();
       LOG.info("Close " + encodedRegionName + ", moving to " + sn);
-      CloseRegionCoordination.CloseRegionDetails crd = regionServer.getCoordinatedStateManager()
-        .getCloseRegionCoordination().parseFromProtoRequest(request);
-
-      boolean closed = regionServer.closeRegion(encodedRegionName, false, crd, sn);
+      boolean closed = regionServer.closeRegion(encodedRegionName, false, sn);
       CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
       return builder.build();
     } catch (IOException ie) {
@@ -1236,11 +1231,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
     for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
       final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
-      OpenRegionCoordination coordination = regionServer.getCoordinatedStateManager().
-        getOpenRegionCoordination();
-      OpenRegionCoordination.OpenRegionDetails ord =
-        coordination.parseFromProtoRequest(regionOpenInfo);
-
       HTableDescriptor htd;
       try {
         final HRegion onlineRegion = regionServer.getFromOnlineRegions(region.getEncodedName());
@@ -1284,10 +1274,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           region.getEncodedNameAsBytes(), Boolean.TRUE);
 
         if (Boolean.FALSE.equals(previous)) {
-          // There is a close in progress. We need to mark this open as failed in ZK.
-
-          coordination.tryTransitionFromOfflineToFailedOpen(regionServer, region, ord);
-
+          // There is a close in progress. This should not happen any more.
           throw new RegionAlreadyInTransitionException("Received OPEN for the region:"
             + region.getRegionNameAsString() + " , which we are already trying to CLOSE ");
         }
@@ -1324,12 +1311,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
           // Need to pass the expected version in the constructor.
           if (region.isMetaRegion()) {
             regionServer.service.submit(new OpenMetaHandler(
-              regionServer, regionServer, region, htd, coordination, ord));
+              regionServer, regionServer, region, htd));
           } else {
             regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(),
               regionOpenInfo.getFavoredNodesList());
             regionServer.service.submit(new OpenRegionHandler(
-              regionServer, regionServer, region, htd, coordination, ord));
+              regionServer, regionServer, region, htd));
           }
         }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
index 2db8d7e..cb28c9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionMergeTransaction.java
@@ -31,22 +31,17 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
+import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.client.Delete;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
-import org.apache.hadoop.hbase.coordination.RegionMergeCoordination.RegionMergeDetails;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.regionserver.SplitTransaction.LoggingProgressable;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Pair;
-import org.apache.zookeeper.KeeperException;
 
 /**
  * Executes region merge as a "transaction". It is similar with
@@ -89,7 +84,6 @@ public class RegionMergeTransaction {
   private final Path mergesdir;
   // We only merge adjacent regions if forcible is false
   private final boolean forcible;
-  private boolean useCoordinationForAssignment;
 
   /**
    * Types to add to the transaction journal. Each enum is a step in the merge
@@ -141,8 +135,6 @@ public class RegionMergeTransaction {
 
   private RegionServerCoprocessorHost rsCoprocessorHost = null;
 
-  private RegionMergeDetails rmd;
-
   /**
    * Constructor
    * @param a region a to merge
@@ -231,14 +223,6 @@ public class RegionMergeTransaction {
    */
   public HRegion execute(final Server server,
  final RegionServerServices services) throws IOException {
-    useCoordinationForAssignment =
-        server == null ? true : ConfigUtil.useZKForAssignment(server.getConfiguration());
-    if (rmd == null) {
-      rmd =
-          server != null && server.getCoordinatedStateManager() != null ? ((BaseCoordinatedStateManager) server
-              .getCoordinatedStateManager()).getRegionMergeCoordination().getDefaultDetails()
-              : null;
-    }
     if (rsCoprocessorHost == null) {
       rsCoprocessorHost = server != null ?
         ((HRegionServer) server).getRegionServerCoprocessorHost() : null;
@@ -253,11 +237,6 @@ public class RegionMergeTransaction {
   public HRegion stepsAfterPONR(final Server server, final RegionServerServices services,
       HRegion mergedRegion) throws IOException {
     openMergedRegion(server, services, mergedRegion);
-    if (useCoordination(server)) {
-      ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-          .getRegionMergeCoordination().completeRegionMergeTransaction(services, mergedRegionInfo,
-            region_a, region_b, rmd, mergedRegion);
-    }
     if (rsCoprocessorHost != null) {
       rsCoprocessorHost.postMerge(this.region_a, this.region_b, mergedRegion);
     }
@@ -322,35 +301,16 @@ public class RegionMergeTransaction {
     // will determine whether the region is merged or not in case of failures.
     // If it is successful, master will roll-forward, if not, master will
     // rollback
-    if (!testing && useCoordinationForAssignment) {
-      if (metaEntries.isEmpty()) {
-        MetaTableAccessor.mergeRegions(server.getShortCircuitConnection(),
-          mergedRegion.getRegionInfo(), region_a.getRegionInfo(), region_b.getRegionInfo(),
-          server.getServerName());
-      } else {
-        mergeRegionsAndPutMetaEntries(server.getShortCircuitConnection(),
-          mergedRegion.getRegionInfo(), region_a.getRegionInfo(), region_b.getRegionInfo(),
-          server.getServerName(), metaEntries);
-      }
-    } else if (services != null && !useCoordinationForAssignment) {
-      if (!services.reportRegionStateTransition(TransitionCode.MERGE_PONR,
-          mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-        // Passed PONR, let SSH clean it up
-        throw new IOException("Failed to notify master that merge passed PONR: "
-          + region_a.getRegionInfo().getRegionNameAsString() + " and "
-          + region_b.getRegionInfo().getRegionNameAsString());
-      }
+    if (services != null && !services.reportRegionStateTransition(TransitionCode.MERGE_PONR,
+        mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
+      // Passed PONR, let SSH clean it up
+      throw new IOException("Failed to notify master that merge passed PONR: "
+        + region_a.getRegionInfo().getRegionNameAsString() + " and "
+        + region_b.getRegionInfo().getRegionNameAsString());
     }
     return mergedRegion;
   }
 
-  private void mergeRegionsAndPutMetaEntries(HConnection hConnection,
-      HRegionInfo mergedRegion, HRegionInfo regionA, HRegionInfo regionB,
-      ServerName serverName, List<Mutation> metaEntries) throws IOException {
-    prepareMutationsForMerge(mergedRegion, regionA, regionB, serverName, metaEntries);
-    MetaTableAccessor.mutateMetaTable(hConnection, metaEntries);
-  }
-
   public void prepareMutationsForMerge(HRegionInfo mergedRegion, HRegionInfo regionA,
       HRegionInfo regionB, ServerName serverName, List<Mutation> mutations) throws IOException {
     HRegionInfo copyOfMerged = new HRegionInfo(mergedRegion);
@@ -380,40 +340,13 @@ public class RegionMergeTransaction {
 
   public HRegion stepsBeforePONR(final Server server, final RegionServerServices services,
       boolean testing) throws IOException {
-    if (rmd == null) {
-      rmd =
-          server != null && server.getCoordinatedStateManager() != null ? ((BaseCoordinatedStateManager) server
-              .getCoordinatedStateManager()).getRegionMergeCoordination().getDefaultDetails()
-              : null;
-    }
-
-    // If server doesn't have a coordination state manager, don't do coordination actions.
-    if (useCoordination(server)) {
-      try {
-        ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-            .getRegionMergeCoordination().startRegionMergeTransaction(mergedRegionInfo,
-              server.getServerName(), region_a.getRegionInfo(), region_b.getRegionInfo());
-      } catch (IOException e) {
-        throw new IOException("Failed to start region merge transaction for "
-            + this.mergedRegionInfo.getRegionNameAsString(), e);
-      }
-    } else if (services != null && !useCoordinationForAssignment) {
-      if (!services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE,
-          mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-        throw new IOException("Failed to get ok from master to merge "
-          + region_a.getRegionInfo().getRegionNameAsString() + " and "
-          + region_b.getRegionInfo().getRegionNameAsString());
-      }
+    if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_MERGE,
+        mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
+      throw new IOException("Failed to get ok from master to merge "
+        + region_a.getRegionInfo().getRegionNameAsString() + " and "
+        + region_b.getRegionInfo().getRegionNameAsString());
     }
     this.journal.add(JournalEntry.SET_MERGING);
-    if (useCoordination(server)) {
-      // After creating the merge node, wait for master to transition it
-      // from PENDING_MERGE to MERGING so that we can move on. We want master
-      // knows about it and won't transition any region which is merging.
-      ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-          .getRegionMergeCoordination().waitForRegionMergeTransaction(services, mergedRegionInfo,
-            region_a, region_b, rmd);
-    }
 
     this.region_a.getRegionFileSystem().createMergesDir();
     this.journal.add(JournalEntry.CREATED_MERGE_DIR);
@@ -432,19 +365,6 @@ public class RegionMergeTransaction {
     // clean this up.
     mergeStoreFiles(hstoreFilesOfRegionA, hstoreFilesOfRegionB);
 
-    if (useCoordination(server)) {
-      try {
-        // Do the final check in case any merging region is moved somehow. If so, the transition
-        // will fail.
-        ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-            .getRegionMergeCoordination().confirmRegionMergeTransaction(this.mergedRegionInfo,
-              region_a.getRegionInfo(), region_b.getRegionInfo(), server.getServerName(), rmd);
-      } catch (IOException e) {
-        throw new IOException("Failed setting MERGING on "
-            + this.mergedRegionInfo.getRegionNameAsString(), e);
-      }
-    }
-
     // Log to the journal that we are creating merged region. We could fail
     // halfway through. If we do, we could have left
     // stuff in fs that needs cleanup -- a storefile or two. Thats why we
@@ -578,20 +498,13 @@ public class RegionMergeTransaction {
     merged.openHRegion(reporter);
 
     if (services != null) {
-      try {
-        if (useCoordinationForAssignment) {
-          services.postOpenDeployTasks(merged);
-        } else if (!services.reportRegionStateTransition(TransitionCode.MERGED,
-            mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
-          throw new IOException("Failed to report merged region to master: "
-            + mergedRegionInfo.getShortNameToLog());
-        }
-        services.addToOnlineRegions(merged);
-      } catch (KeeperException ke) {
-        throw new IOException(ke);
+      if (!services.reportRegionStateTransition(TransitionCode.MERGED,
+          mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
+        throw new IOException("Failed to report merged region to master: "
+          + mergedRegionInfo.getShortNameToLog());
       }
+      services.addToOnlineRegions(merged);
     }
-
   }
 
   /**
@@ -652,10 +565,7 @@ public class RegionMergeTransaction {
       switch (je) {
 
         case SET_MERGING:
-        if (useCoordination(server)) {
-          ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-              .getRegionMergeCoordination().clean(this.mergedRegionInfo);
-          } else if (services != null && !useCoordinationForAssignment
+          if (services != null
               && !services.reportRegionStateTransition(TransitionCode.MERGE_REVERTED,
                   mergedRegionInfo, region_a.getRegionInfo(), region_b.getRegionInfo())) {
             return false;
@@ -734,13 +644,6 @@ public class RegionMergeTransaction {
     return this.mergesdir;
   }
 
-  private boolean useCoordination(final Server server) {
-    return server != null && useCoordinationForAssignment
-        && server.getCoordinatedStateManager() != null;
-  }
-
-
-
   /**
    * Checks if the given region has merge qualifier in hbase:meta
    * @param services

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
index c5b29e6..30b55dd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/SplitTransaction.java
@@ -39,16 +39,11 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
-import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
-import org.apache.hadoop.hbase.coordination.SplitTransactionCoordination;
 import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CancelableProgressable;
-import org.apache.hadoop.hbase.util.ConfigUtil;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.PairOfSameType;
@@ -90,8 +85,6 @@ public class SplitTransaction {
   private HRegionInfo hri_a;
   private HRegionInfo hri_b;
   private long fileSplitTimeout = 30000;
-  public SplitTransactionCoordination.SplitTransactionDetails std;
-  boolean useZKForAssignment;
 
   /*
    * Row to split around
@@ -275,52 +268,23 @@ public class SplitTransaction {
     // will determine whether the region is split or not in case of failures.
     // If it is successful, master will roll-forward, if not, master will rollback
     // and assign the parent region.
-    if (!testing && useZKForAssignment) {
-      if (metaEntries == null || metaEntries.isEmpty()) {
-        MetaTableAccessor.splitRegion(server.getShortCircuitConnection(),
-          parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(),
-          daughterRegions.getSecond().getRegionInfo(), server.getServerName());
-      } else {
-        offlineParentInMetaAndputMetaEntries(server.getShortCircuitConnection(),
-          parent.getRegionInfo(), daughterRegions.getFirst().getRegionInfo(), daughterRegions
-              .getSecond().getRegionInfo(), server.getServerName(), metaEntries);
-      }
-    } else if (services != null && !useZKForAssignment) {
-      if (!services.reportRegionStateTransition(TransitionCode.SPLIT_PONR,
-          parent.getRegionInfo(), hri_a, hri_b)) {
-        // Passed PONR, let SSH clean it up
-        throw new IOException("Failed to notify master that split passed PONR: "
-          + parent.getRegionInfo().getRegionNameAsString());
-      }
+    if (services != null && !services.reportRegionStateTransition(TransitionCode.SPLIT_PONR,
+        parent.getRegionInfo(), hri_a, hri_b)) {
+      // Passed PONR, let SSH clean it up
+      throw new IOException("Failed to notify master that split passed PONR: "
+        + parent.getRegionInfo().getRegionNameAsString());
     }
     return daughterRegions;
   }
 
   public PairOfSameType<HRegion> stepsBeforePONR(final Server server,
       final RegionServerServices services, boolean testing) throws IOException {
-
-    if (useCoordinatedStateManager(server)) {
-      if (std == null) {
-        std =
-            ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-                .getSplitTransactionCoordination().getDefaultDetails();
-      }
-      ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-          .getSplitTransactionCoordination().startSplitTransaction(parent, server.getServerName(),
-            hri_a, hri_b);
-    } else if (services != null && !useZKForAssignment) {
-      if (!services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
-          parent.getRegionInfo(), hri_a, hri_b)) {
-        throw new IOException("Failed to get ok from master to split "
-          + parent.getRegionNameAsString());
-      }
+    if (services != null && !services.reportRegionStateTransition(TransitionCode.READY_TO_SPLIT,
+        parent.getRegionInfo(), hri_a, hri_b)) {
+      throw new IOException("Failed to get ok from master to split "
+        + parent.getRegionNameAsString());
     }
     this.journal.add(JournalEntry.SET_SPLITTING);
-    if (useCoordinatedStateManager(server)) {
-      ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-          .getSplitTransactionCoordination().waitForSplitTransaction(services, parent, hri_a,
-            hri_b, std);
-    }
 
     this.parent.getRegionFileSystem().createSplitsDir();
     this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
@@ -415,24 +379,14 @@ public class SplitTransaction {
           bOpener.getName(), bOpener.getException());
       }
       if (services != null) {
-        try {
-          if (useZKForAssignment) {
-            // add 2nd daughter first (see HBASE-4335)
-            services.postOpenDeployTasks(b);
-          } else if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
-              parent.getRegionInfo(), hri_a, hri_b)) {
-            throw new IOException("Failed to report split region to master: "
-              + parent.getRegionInfo().getShortNameToLog());
-          }
-          // Should add it to OnlineRegions
-          services.addToOnlineRegions(b);
-          if (useZKForAssignment) {
-            services.postOpenDeployTasks(a);
-          }
-          services.addToOnlineRegions(a);
-        } catch (KeeperException ke) {
-          throw new IOException(ke);
+        if (!services.reportRegionStateTransition(TransitionCode.SPLIT,
+            parent.getRegionInfo(), hri_a, hri_b)) {
+          throw new IOException("Failed to report split region to master: "
+            + parent.getRegionInfo().getShortNameToLog());
         }
+        // Should add it to OnlineRegions
+        services.addToOnlineRegions(b);
+        services.addToOnlineRegions(a);
       }
     }
   }
@@ -450,13 +404,6 @@ public class SplitTransaction {
   public PairOfSameType<HRegion> execute(final Server server,
       final RegionServerServices services)
   throws IOException {
-    useZKForAssignment = server == null ? true :
-      ConfigUtil.useZKForAssignment(server.getConfiguration());
-    if (useCoordinatedStateManager(server)) {
-      std =
-          ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-              .getSplitTransactionCoordination().getDefaultDetails();
-    }
     PairOfSameType<HRegion> regions = createDaughters(server, services);
     if (this.parent.getCoprocessorHost() != null) {
       this.parent.getCoprocessorHost().preSplitAfterPONR();
@@ -468,44 +415,13 @@ public class SplitTransaction {
       final RegionServerServices services, PairOfSameType<HRegion> regions)
       throws IOException {
     openDaughters(server, services, regions.getFirst(), regions.getSecond());
-    if (useCoordinatedStateManager(server)) {
-      ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-          .getSplitTransactionCoordination().completeSplitTransaction(services, regions.getFirst(),
-            regions.getSecond(), std, parent);
-    }
     // Coprocessor callback
     if (parent.getCoprocessorHost() != null) {
       parent.getCoprocessorHost().postSplit(regions.getFirst(), regions.getSecond());
     }
-
-
     return regions;
   }
 
-  private void offlineParentInMetaAndputMetaEntries(HConnection hConnection,
-      HRegionInfo parent, HRegionInfo splitA, HRegionInfo splitB,
-      ServerName serverName, List<Mutation> metaEntries) throws IOException {
-    List<Mutation> mutations = metaEntries;
-    HRegionInfo copyOfParent = new HRegionInfo(parent);
-    copyOfParent.setOffline(true);
-    copyOfParent.setSplit(true);
-
-    //Put for parent
-    Put putParent = MetaTableAccessor.makePutFromRegionInfo(copyOfParent);
-    MetaTableAccessor.addDaughtersToPut(putParent, splitA, splitB);
-    mutations.add(putParent);
-    
-    //Puts for daughters
-    Put putA = MetaTableAccessor.makePutFromRegionInfo(splitA);
-    Put putB = MetaTableAccessor.makePutFromRegionInfo(splitB);
-
-    addLocation(putA, serverName, 1); //these are new regions, openSeqNum = 1 is fine.
-    addLocation(putB, serverName, 1);
-    mutations.add(putA);
-    mutations.add(putB);
-    MetaTableAccessor.mutateMetaTable(hConnection, mutations);
-  }
-
   public Put addLocation(final Put p, final ServerName sn, long openSeqNum) {
     p.addImmutable(HConstants.CATALOG_FAMILY, HConstants.SERVER_QUALIFIER,
       Bytes.toBytes(sn.getHostAndPort()));
@@ -588,10 +504,6 @@ public class SplitTransaction {
     }
   }
 
-  private boolean useCoordinatedStateManager(final Server server) {
-    return server != null && useZKForAssignment && server.getCoordinatedStateManager() != null;
-  }
-
   private void splitStoreFiles(final Map<byte[], List<StoreFile>> hstoreFilesToSplit)
       throws IOException {
     if (hstoreFilesToSplit == null) {
@@ -707,10 +619,7 @@ public class SplitTransaction {
       switch(je) {
 
       case SET_SPLITTING:
-        if (useCoordinatedStateManager(server) && server instanceof HRegionServer) {
-          ((BaseCoordinatedStateManager) server.getCoordinatedStateManager())
-              .getSplitTransactionCoordination().clean(this.parent.getRegionInfo());
-        } else if (services != null && !useZKForAssignment
+        if (services != null
             && !services.reportRegionStateTransition(TransitionCode.SPLIT_REVERTED,
                 parent.getRegionInfo(), hri_a, hri_b)) {
           return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/17dff681/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java
index df8ae23..14d9b17 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/CloseMetaHandler.java
@@ -23,10 +23,9 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.executor.EventType;
 import org.apache.hadoop.hbase.regionserver.RegionServerServices;
-import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
 
 /**
- * Handles closing of the root region on a region server.
+ * Handles closing of the meta region on a region server.
  */
 @InterfaceAudience.Private
 public class CloseMetaHandler extends CloseRegionHandler {
@@ -35,9 +34,7 @@ public class CloseMetaHandler extends CloseRegionHandler {
   public CloseMetaHandler(final Server server,
       final RegionServerServices rsServices,
       final HRegionInfo regionInfo,
-      final boolean abort, CloseRegionCoordination closeRegionCoordination,
-      CloseRegionCoordination.CloseRegionDetails crd) {
-    super(server, rsServices, regionInfo, abort, closeRegionCoordination,
-      crd, EventType.M_RS_CLOSE_META);
+      final boolean abort) {
+    super(server, rsServices, regionInfo, abort, EventType.M_RS_CLOSE_META, null);
   }
 }