You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by el...@apache.org on 2017/01/23 23:01:52 UTC

[19/50] [abbrv] hbase git commit: HBASE-16786 Procedure V2 - Move ZK-lock's uses to Procedure framework locks (LockProcedure) - Matteo Bertozzi Locks are no longer hosted up in zookeeper but instead by the Master.

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 5259961..ceed050 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -48,7 +48,6 @@ import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.management.MalformedObjectNameException;
@@ -62,6 +61,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClockOutOfSyncException;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -87,6 +87,8 @@ import org.apache.hadoop.hbase.client.ConnectionUtils;
 import org.apache.hadoop.hbase.client.NonceGenerator;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
+import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
 import org.apache.hadoop.hbase.conf.ConfigurationObserver;
 import org.apache.hadoop.hbase.coordination.BaseCoordinatedStateManager;
@@ -111,7 +113,6 @@ import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.RegionState.State;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
 import org.apache.hadoop.hbase.mob.MobCacheConfig;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
@@ -147,6 +148,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionServerInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
@@ -214,6 +216,9 @@ import sun.misc.SignalHandler;
 public class HRegionServer extends HasThread implements
     RegionServerServices, LastSequenceId, ConfigurationObserver {
 
+  public static final String REGION_LOCK_AWAIT_TIME_SEC =
+      "hbase.regionserver.region.lock.await.time.sec";
+  public static final int DEFAULT_REGION_LOCK_AWAIT_TIME_SEC = 300;  // 5 min
   private static final Log LOG = LogFactory.getLog(HRegionServer.class);
 
   /**
@@ -338,6 +343,7 @@ public class HRegionServer extends HasThread implements
 
   // Stub to do region server status calls against the master.
   private volatile RegionServerStatusService.BlockingInterface rssStub;
+  private volatile LockService.BlockingInterface lockStub;
   // RPC client. Used to make the stub above that does region server status checking.
   RpcClient rpcClient;
 
@@ -464,9 +470,6 @@ public class HRegionServer extends HasThread implements
 
   private RegionServerQuotaManager rsQuotaManager;
 
-  // Table level lock manager for locking for region operations
-  protected TableLockManager tableLockManager;
-
   /**
    * Nonce manager. Nonces are used to make operations like increment and append idempotent
    * in the case where client doesn't receive the response from a successful operation and
@@ -604,9 +607,6 @@ public class HRegionServer extends HasThread implements
       this.csm.initialize(this);
       this.csm.start();
 
-      tableLockManager = TableLockManager.createTableLockManager(
-        conf, zooKeeper, serverName);
-
       masterAddressTracker = new MasterAddressTracker(getZooKeeper(), this);
       masterAddressTracker.start();
 
@@ -1134,6 +1134,9 @@ public class HRegionServer extends HasThread implements
     if (this.rssStub != null) {
       this.rssStub = null;
     }
+    if (this.lockStub != null) {
+      this.lockStub = null;
+    }
     if (this.rpcClient != null) {
       this.rpcClient.close();
     }
@@ -1529,11 +1532,6 @@ public class HRegionServer extends HasThread implements
     return regionServerAccounting;
   }
 
-  @Override
-  public TableLockManager getTableLockManager() {
-    return tableLockManager;
-  }
-
   /*
    * @param r Region to get RegionLoad for.
    * @param regionLoadBldr the RegionLoad.Builder, can be null
@@ -2385,7 +2383,8 @@ public class HRegionServer extends HasThread implements
     }
     ServerName sn = null;
     long previousLogTime = 0;
-    RegionServerStatusService.BlockingInterface intf = null;
+    RegionServerStatusService.BlockingInterface intRssStub = null;
+    LockService.BlockingInterface intLockStub = null;
     boolean interrupted = false;
     try {
       while (keepLooping()) {
@@ -2409,14 +2408,16 @@ public class HRegionServer extends HasThread implements
 
         // If we are on the active master, use the shortcut
         if (this instanceof HMaster && sn.equals(getServerName())) {
-          intf = ((HMaster)this).getMasterRpcServices();
+          intRssStub = ((HMaster)this).getMasterRpcServices();
+          intLockStub = ((HMaster)this).getMasterRpcServices();
           break;
         }
         try {
           BlockingRpcChannel channel =
             this.rpcClient.createBlockingRpcChannel(sn, userProvider.getCurrent(),
               shortOperationTimeout);
-          intf = RegionServerStatusService.newBlockingStub(channel);
+          intRssStub = RegionServerStatusService.newBlockingStub(channel);
+          intLockStub = LockService.newBlockingStub(channel);
           break;
         } catch (IOException e) {
           if (System.currentTimeMillis() > (previousLogTime + 1000)) {
@@ -2439,7 +2440,8 @@ public class HRegionServer extends HasThread implements
         Thread.currentThread().interrupt();
       }
     }
-    rssStub = intf;
+    this.rssStub = intRssStub;
+    this.lockStub = intLockStub;
     return sn;
   }
 
@@ -3616,4 +3618,11 @@ public class HRegionServer extends HasThread implements
   public SecureBulkLoadManager getSecureBulkLoadManager() {
     return this.secureBulkLoadManager;
   }
-}
+
+  @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description,
+      Abortable abort) throws IOException {
+    return new LockServiceClient(conf, lockStub, clusterConnection.getNonceGenerator())
+      .regionLock(regionInfos, description, abort);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 2a93b70..808cd20 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1800,7 +1800,6 @@ public class HStore implements Store {
   @VisibleForTesting
   protected void completeCompaction(final Collection<StoreFile> compactedFiles)
     throws IOException {
-    LOG.debug("Completing compaction...");
     this.storeSize = 0L;
     this.totalUncompressedBytes = 0L;
     for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
index 5a6c7ed..c92124c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerServices.java
@@ -25,14 +25,15 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
@@ -77,11 +78,6 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
   RegionServerAccounting getRegionServerAccounting();
 
   /**
-   * @return RegionServer's instance of {@link TableLockManager}
-   */
-  TableLockManager getTableLockManager();
-
-  /**
    * @return RegionServer's instance of {@link RegionServerQuotaManager}
    */
   RegionServerQuotaManager getRegionServerQuotaManager();
@@ -271,4 +267,10 @@ public interface RegionServerServices extends OnlineRegions, FavoredNodesForRegi
    * @return the metrics tracker for the region server
    */
   MetricsRegionServer getMetrics();
+
+  /**
+   * Master based locks on namespaces/tables/regions.
+   */
+  EntityLock regionLock(List<HRegionInfo> regionInfos, String description,
+      Abortable abort) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index defffe3..553f756 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -129,7 +129,6 @@ import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker;
 import org.apache.hadoop.hbase.util.hbck.ReplicationChecker;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandler;
 import org.apache.hadoop.hbase.util.hbck.TableIntegrityErrorHandlerImpl;
-import org.apache.hadoop.hbase.util.hbck.TableLockChecker;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALFactory;
 import org.apache.hadoop.hbase.wal.WALSplitter;
@@ -249,7 +248,6 @@ public class HBaseFsck extends Configured implements Closeable {
   private boolean fixSplitParents = false; // fix lingering split parents
   private boolean fixReferenceFiles = false; // fix lingering reference store file
   private boolean fixEmptyMetaCells = false; // fix (remove) empty REGIONINFO_QUALIFIER rows
-  private boolean fixTableLocks = false; // fix table locks which are expired
   private boolean fixReplication = false; // fix undeleted replication queues for removed peer
   private boolean fixAny = false; // Set to true if any of the fix is required.
 
@@ -768,8 +766,6 @@ public class HBaseFsck extends Configured implements Closeable {
       checkRegionBoundaries();
     }
 
-    checkAndFixTableLocks();
-
     checkAndFixReplication();
 
     // Remove the hbck znode
@@ -1537,7 +1533,7 @@ public class HBaseFsck extends Configured implements Closeable {
 
   /**
    * Removes the empty Meta recovery WAL directory.
-   * @param walFactoryID A unique identifier for WAL factory which was used by Filesystem to make a
+   * @param walFactoryId A unique identifier for WAL factory which was used by Filesystem to make a
    *          Meta recovery WAL directory inside WAL directory path.
    */
   private void removeHBCKMetaRecoveryWALDir(String walFactoryId) throws IOException {
@@ -3342,15 +3338,6 @@ public class HBaseFsck extends Configured implements Closeable {
     return hbi;
   }
 
-  private void checkAndFixTableLocks() throws IOException {
-    TableLockChecker checker = new TableLockChecker(zkw, errors);
-    checker.checkTableLocks();
-
-    if (this.fixTableLocks) {
-      checker.fixExpiredTableLocks();
-    }
-  }
-
   private void checkAndFixReplication() throws IOException {
     ReplicationChecker checker = new ReplicationChecker(getConf(), zkw, connection, errors);
     checker.checkUnDeletedQueues();
@@ -4316,15 +4303,6 @@ public class HBaseFsck extends Configured implements Closeable {
   }
 
   /**
-   * Set table locks fix mode.
-   * Delete table locks held for a long time
-   */
-  public void setFixTableLocks(boolean shouldFix) {
-    fixTableLocks = shouldFix;
-    fixAny |= shouldFix;
-  }
-
-  /**
    * Set replication fix mode.
    */
   public void setFixReplication(boolean shouldFix) {
@@ -4583,14 +4561,10 @@ public class HBaseFsck extends Configured implements Closeable {
     out.println("");
     out.println("  Metadata Repair shortcuts");
     out.println("   -repair           Shortcut for -fixAssignments -fixMeta -fixHdfsHoles " +
-        "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles -fixTableLocks");
+        "-fixHdfsOrphans -fixHdfsOverlaps -fixVersionFile -sidelineBigOverlaps -fixReferenceFiles");
     out.println("   -repairHoles      Shortcut for -fixAssignments -fixMeta -fixHdfsHoles");
 
     out.println("");
-    out.println("  Table lock options");
-    out.println("   -fixTableLocks    Deletes table locks held for a long time (hbase.table.lock.expire.ms, 10min by default)");
-
-    out.println("");
     out.println(" Replication options");
     out.println("   -fixReplication   Deletes replication queues for removed peers");
 
@@ -4728,7 +4702,6 @@ public class HBaseFsck extends Configured implements Closeable {
         setFixSplitParents(false);
         setCheckHdfs(true);
         setFixReferenceFiles(true);
-        setFixTableLocks(true);
       } else if (cmd.equals("-repairHoles")) {
         // this will make all missing hdfs regions available but may lose data
         setFixHdfsHoles(true);
@@ -4775,8 +4748,6 @@ public class HBaseFsck extends Configured implements Closeable {
         setCheckMetaOnly();
       } else if (cmd.equals("-boundaries")) {
         setRegionBoundariesCheck();
-      } else if (cmd.equals("-fixTableLocks")) {
-        setFixTableLocks(true);
       } else if (cmd.equals("-fixReplication")) {
         setFixReplication(true);
       } else if (cmd.startsWith("-")) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
deleted file mode 100644
index 6777546..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/TableLockChecker.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.util.hbck;
-
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.InterProcessLock.MetadataHandler;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.HBaseFsck;
-import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-
-/**
- * Utility to check and fix table locks. Need zookeeper connection.
- */
-public class TableLockChecker {
-
-  private ZooKeeperWatcher zkWatcher;
-  private ErrorReporter errorReporter;
-  long expireTimeout;
-
-  public TableLockChecker(ZooKeeperWatcher zkWatcher, ErrorReporter errorReporter) {
-    this.zkWatcher = zkWatcher;
-    this.errorReporter = errorReporter;
-    expireTimeout = zkWatcher.getConfiguration().getLong(
-        TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS);
-  }
-
-  public void checkTableLocks() throws IOException {
-    TableLockManager tableLockManager
-      = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
-    final long expireDate = EnvironmentEdgeManager.currentTime() - expireTimeout;
-
-    MetadataHandler handler = new MetadataHandler() {
-      @Override
-      public void handleMetadata(byte[] ownerMetadata) {
-        ZooKeeperProtos.TableLock data = TableLockManager.fromBytes(ownerMetadata);
-        String msg = "Table lock acquire attempt found:";
-        if (data != null) {
-           msg = msg +
-              String.format("[tableName=%s:%s, lockOwner=%s, threadId=%s, " +
-              "purpose=%s, isShared=%s, createTime=%s]",
-              data.getTableName().getNamespace().toStringUtf8(),
-              data.getTableName().getQualifier().toStringUtf8(),
-              ProtobufUtil.toServerName(data.getLockOwner()), data.getThreadId(),
-              data.getPurpose(), data.getIsShared(), data.getCreateTime());
-        }
-
-        if (data != null && data.hasCreateTime() && data.getCreateTime() < expireDate) {
-          errorReporter.reportError(HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK, msg);
-        } else {
-          errorReporter.print(msg);
-        }
-      }
-    };
-
-    tableLockManager.visitAllLocks(handler);
-  }
-
-  public void fixExpiredTableLocks() throws IOException {
-    TableLockManager tableLockManager
-      = TableLockManager.createTableLockManager(zkWatcher.getConfiguration(), zkWatcher, null);
-
-    tableLockManager.reapAllExpiredLocks();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
index 404c9ae..5e2a70f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MockRegionServerServices.java
@@ -32,11 +32,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.fs.HFileSystem;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager;
-import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 import org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
@@ -190,11 +189,6 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return new NullTableLockManager();
-  }
-
-  @Override
   public RegionServerQuotaManager getRegionServerQuotaManager() {
     return null;
   }
@@ -353,6 +347,12 @@ public class MockRegionServerServices implements RegionServerServices {
   }
 
   @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return null;
+  }
+
+  @Override
   public SecureBulkLoadManager getSecureBulkLoadManager() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 65eca6c..66963fd 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -2173,7 +2172,7 @@ public class TestMasterObserver {
 
     final TableName tableName = TableName.valueOf("testLockedTable");
     long procId = master.getLockManager().remoteLocks().requestTableLock(tableName,
-          LockProcedure.LockType.EXCLUSIVE, "desc", HConstants.NO_NONCE, HConstants.NO_NONCE);
+          LockProcedure.LockType.EXCLUSIVE, "desc", null);
     master.getLockManager().remoteLocks().lockHeartbeat(procId, false);
 
     assertTrue(cp.preAndPostForQueueLockAndHeartbeatLockCalled());

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 48cf8a5..683e9b3 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -310,11 +310,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return null;
-  }
-
-  @Override
   public TableStateManager getTableStateManager() {
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index c5f294a..467d4a5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ChoreService;
@@ -47,7 +48,6 @@ import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;
-import org.apache.hadoop.hbase.master.TableLockManager.NullTableLockManager;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest;
@@ -334,11 +334,6 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
-  public TableLockManager getTableLockManager() {
-    return new NullTableLockManager();
-  }
-
-  @Override
   public RegionServerQuotaManager getRegionServerQuotaManager() {
     return null;
   }
@@ -703,6 +698,12 @@ ClientProtos.ClientService.BlockingInterface, RegionServerServices {
   }
 
   @Override
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort)
+      throws IOException {
+    return null;
+  }
+
+  @Override
   public PrepareBulkLoadResponse prepareBulkLoad(RpcController controller,
       PrepareBulkLoadRequest request) throws ServiceException {
     return null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
deleted file mode 100644
index 94b2bc1..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestTableLockManager.java
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * Copyright The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.ChoreService;
-import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.InterProcessLock;
-import org.apache.hadoop.hbase.NotServingRegionException;
-import org.apache.hadoop.hbase.ScheduledChore;
-import org.apache.hadoop.hbase.ServerName;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.TableNotDisabledException;
-import org.apache.hadoop.hbase.Waiter;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.coprocessor.BaseMasterObserver;
-import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
-import org.apache.hadoop.hbase.coprocessor.ObserverContext;
-import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.testclassification.LargeTests;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.LoadTestTool;
-import org.apache.hadoop.hbase.util.StoppableImplementation;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-/**
- * Tests the default table lock manager
- */
-@Category({MasterTests.class, LargeTests.class})
-public class TestTableLockManager {
-
-  private static final Log LOG =
-    LogFactory.getLog(TestTableLockManager.class);
-
-  private static final TableName TABLE_NAME =
-      TableName.valueOf("TestTableLevelLocks");
-
-  private static final byte[] FAMILY = Bytes.toBytes("f1");
-
-  private static final byte[] NEW_FAMILY = Bytes.toBytes("f2");
-
-  private final HBaseTestingUtility TEST_UTIL =
-    new HBaseTestingUtility();
-
-  private static final CountDownLatch deleteColumn = new CountDownLatch(1);
-  private static final CountDownLatch addColumn = new CountDownLatch(1);
-
-  public void prepareMiniCluster() throws Exception {
-    TEST_UTIL.startMiniCluster(2);
-    TEST_UTIL.createTable(TABLE_NAME, FAMILY);
-  }
-
-  public void prepareMiniZkCluster() throws Exception {
-    TEST_UTIL.startMiniZKCluster(1);
-  }
-
-  @Before
-  public void setUp() throws IOException {
-    TEST_UTIL.getConfiguration().set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
-        String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    TEST_UTIL.shutdownMiniCluster();
-  }
-
-  public static class TestLockTimeoutExceptionMasterObserver extends BaseMasterObserver {
-    @Override
-    public void preDeleteColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, byte[] columnFamily) throws IOException {
-      deleteColumn.countDown();
-    }
-    @Override
-    public void postCompletedDeleteColumnFamilyAction(
-        ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, byte[] columnFamily) throws IOException {
-      Threads.sleep(10000);
-    }
-
-    @Override
-    public void preAddColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      fail("Add column should have timeouted out for acquiring the table lock");
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testAlterAndDisable() throws Exception {
-    prepareMiniCluster();
-    // Send a request to alter a table, then sleep during
-    // the alteration phase. In the mean time, from another
-    // thread, send a request to disable, and then delete a table.
-
-    HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
-    master.getMasterCoprocessorHost().load(TestAlterAndDisableMasterObserver.class,
-            0, TEST_UTIL.getConfiguration());
-
-    ExecutorService executor = Executors.newFixedThreadPool(2);
-    Future<Object> alterTableFuture = executor.submit(new Callable<Object>() {
-      @Override
-      public Object call() throws Exception {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        admin.addColumnFamily(TABLE_NAME, new HColumnDescriptor(NEW_FAMILY));
-        LOG.info("Added new column family");
-        HTableDescriptor tableDesc = admin.getTableDescriptor(TABLE_NAME);
-        assertTrue(tableDesc.getFamiliesKeys().contains(NEW_FAMILY));
-        return null;
-      }
-    });
-    Future<Object> disableTableFuture = executor.submit(new Callable<Object>() {
-      @Override
-      public Object call() throws Exception {
-        Admin admin = TEST_UTIL.getHBaseAdmin();
-        admin.disableTable(TABLE_NAME);
-        assertTrue(admin.isTableDisabled(TABLE_NAME));
-        admin.deleteTable(TABLE_NAME);
-        assertFalse(admin.tableExists(TABLE_NAME));
-        return null;
-      }
-    });
-
-    try {
-      disableTableFuture.get();
-      alterTableFuture.get();
-    } catch (ExecutionException e) {
-      if (e.getCause() instanceof AssertionError) {
-        throw (AssertionError) e.getCause();
-      }
-      throw e;
-    }
-  }
-
-  public static class TestAlterAndDisableMasterObserver extends BaseMasterObserver {
-    @Override
-    public void preAddColumnFamilyAction(ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      LOG.debug("addColumn called");
-      addColumn.countDown();
-    }
-
-    @Override
-    public void postCompletedAddColumnFamilyAction(
-        ObserverContext<MasterCoprocessorEnvironment> ctx,
-        TableName tableName, HColumnDescriptor columnFamily) throws IOException {
-      Threads.sleep(6000);
-      try {
-        ctx.getEnvironment().getMasterServices().checkTableModifiable(tableName);
-      } catch(TableNotDisabledException expected) {
-        //pass
-        return;
-      } catch(IOException ex) {
-      }
-      fail("was expecting the table to be enabled");
-    }
-
-    @Override
-    public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                TableName tableName) throws IOException {
-      try {
-        LOG.debug("Waiting for addColumn to be processed first");
-        //wait for addColumn to be processed first
-        addColumn.await();
-        LOG.debug("addColumn started, we can continue");
-      } catch (InterruptedException ex) {
-        LOG.warn("Sleep interrupted while waiting for addColumn countdown");
-      }
-    }
-
-    @Override
-    public void postDisableTableHandler(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                        TableName tableName) throws IOException {
-      Threads.sleep(3000);
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testDelete() throws Exception {
-    prepareMiniCluster();
-
-    Admin admin = TEST_UTIL.getHBaseAdmin();
-    admin.disableTable(TABLE_NAME);
-    admin.deleteTable(TABLE_NAME);
-
-    //ensure that znode for the table node has been deleted
-    final ZooKeeperWatcher zkWatcher = TEST_UTIL.getZooKeeperWatcher();
-    final String znode = ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode,
-      TABLE_NAME.getNameAsString());
-
-    TEST_UTIL.waitFor(5000, new Waiter.Predicate<Exception>() {
-      @Override
-      public boolean evaluate() throws Exception {
-        int ver = ZKUtil.checkExists(zkWatcher, znode);
-        return ver < 0;
-      }
-    });
-    int ver = ZKUtil.checkExists(zkWatcher,
-      ZKUtil.joinZNode(zkWatcher.znodePaths.tableLockZNode, TABLE_NAME.getNameAsString()));
-    assertTrue("Unexpected znode version " + ver, ver < 0);
-
-  }
-
-  public class TableLockCounter implements InterProcessLock.MetadataHandler {
-
-    private int lockCount = 0;
-
-    @Override
-    public void handleMetadata(byte[] metadata) {
-      lockCount++;
-    }
-
-    public void reset() {
-      lockCount = 0;
-    }
-
-    public int getLockCount() {
-      return lockCount;
-    }
-  }
-
-  @Test(timeout = 600000)
-  public void testReapAllTableLocks() throws Exception {
-    prepareMiniZkCluster();
-    ServerName serverName = ServerName.valueOf("localhost:10000", 0);
-    final TableLockManager lockManager = TableLockManager.createTableLockManager(
-        TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName);
-
-    String tables[] = {"table1", "table2", "table3", "table4"};
-    ExecutorService executor = Executors.newFixedThreadPool(6);
-
-    final CountDownLatch writeLocksObtained = new CountDownLatch(4);
-    final CountDownLatch writeLocksAttempted = new CountDownLatch(10);
-    //TODO: read lock tables
-
-    //6 threads will be stuck waiting for the table lock
-    for (int i = 0; i < tables.length; i++) {
-      final String table = tables[i];
-      for (int j = 0; j < i+1; j++) { //i+1 write locks attempted for table[i]
-        executor.submit(new Callable<Void>() {
-          @Override
-          public Void call() throws Exception {
-            writeLocksAttempted.countDown();
-            lockManager.writeLock(TableName.valueOf(table),
-                    "testReapAllTableLocks").acquire();
-            writeLocksObtained.countDown();
-            return null;
-          }
-        });
-      }
-    }
-
-    writeLocksObtained.await();
-    writeLocksAttempted.await();
-
-    TableLockCounter counter = new TableLockCounter();
-    do {
-      counter.reset();
-      lockManager.visitAllLocks(counter);
-      Thread.sleep(10);
-    } while (counter.getLockCount() != 10);
-
-    //now reap all table locks
-    lockManager.reapWriteLocks();
-    TEST_UTIL.getConfiguration().setInt(TableLockManager.TABLE_WRITE_LOCK_TIMEOUT_MS, 0);
-    TableLockManager zeroTimeoutLockManager = TableLockManager.createTableLockManager(
-          TEST_UTIL.getConfiguration(), TEST_UTIL.getZooKeeperWatcher(), serverName);
-
-    //should not throw table lock timeout exception
-    zeroTimeoutLockManager.writeLock(
-        TableName.valueOf(tables[tables.length - 1]),
-        "zero timeout")
-      .acquire();
-
-    executor.shutdownNow();
-  }
-
-  @Test(timeout = 600000)
-  public void testTableReadLock() throws Exception {
-    // test plan: write some data to the table. Continuously alter the table and
-    // force splits
-    // concurrently until we have 5 regions. verify the data just in case.
-    // Every region should contain the same table descriptor
-    // This is not an exact test
-    prepareMiniCluster();
-    LoadTestTool loadTool = new LoadTestTool();
-    loadTool.setConf(TEST_UTIL.getConfiguration());
-    int numKeys = 10000;
-    final TableName tableName = TableName.valueOf("testTableReadLock");
-    final Admin admin = TEST_UTIL.getHBaseAdmin();
-    final HTableDescriptor desc = new HTableDescriptor(tableName);
-    final byte[] family = Bytes.toBytes("test_cf");
-    desc.addFamily(new HColumnDescriptor(family));
-    admin.createTable(desc); // create with one region
-
-    // write some data, not much
-    int ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-write",
-        String.format("%d:%d:%d", 1, 10, 10), "-num_keys", String.valueOf(numKeys), "-skip_init" });
-    if (0 != ret) {
-      String errorMsg = "Load failed with error code " + ret;
-      LOG.error(errorMsg);
-      fail(errorMsg);
-    }
-
-    int familyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
-    StoppableImplementation stopper = new StoppableImplementation();
-    final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
-
-    //alter table every 10 sec
-    ScheduledChore alterThread = new ScheduledChore("Alter Chore", stopper, 10000) {
-      @Override
-      protected void chore() {
-        Random random = new Random();
-        try {
-          HTableDescriptor htd = admin.getTableDescriptor(tableName);
-          String val = String.valueOf(random.nextInt());
-          htd.getFamily(family).setValue(val, val);
-          desc.getFamily(family).setValue(val, val); // save it for later
-                                                     // control
-          admin.modifyTable(tableName, htd);
-        } catch (Exception ex) {
-          LOG.warn("Caught exception", ex);
-          fail(ex.getMessage());
-        }
-      }
-    };
-
-    //split table every 5 sec
-    ScheduledChore splitThread = new ScheduledChore("Split thread", stopper, 5000) {
-      @Override
-      public void chore() {
-        try {
-          HRegion region = TEST_UTIL.getSplittableRegion(tableName, -1);
-          if (region != null) {
-            byte[] regionName = region.getRegionInfo().getRegionName();
-            admin.flushRegion(regionName);
-            admin.compactRegion(regionName);
-            admin.splitRegion(regionName);
-          } else {
-            LOG.warn("Could not find suitable region for the table.  Possibly the " +
-              "region got closed and the attempts got over before " +
-              "the region could have got reassigned.");
-          }
-        } catch (NotServingRegionException nsre) {
-          // the region may be in transition
-          LOG.warn("Caught exception", nsre);
-        } catch (Exception ex) {
-          LOG.warn("Caught exception", ex);
-          fail(ex.getMessage());
-        }
-      }
-    };
-
-    choreService.scheduleChore(alterThread);
-    choreService.scheduleChore(splitThread);
-    TEST_UTIL.waitTableEnabled(tableName);
-    while (true) {
-      List<HRegionInfo> regions = admin.getTableRegions(tableName);
-      LOG.info(String.format("Table #regions: %d regions: %s:", regions.size(), regions));
-      assertEquals(admin.getTableDescriptor(tableName), desc);
-      for (HRegion region : TEST_UTIL.getMiniHBaseCluster().getRegions(tableName)) {
-        HTableDescriptor regionTableDesc = region.getTableDesc();
-        assertEquals(desc, regionTableDesc);
-      }
-      if (regions.size() >= 5) {
-        break;
-      }
-      Threads.sleep(1000);
-    }
-    stopper.stop("test finished");
-
-    int newFamilyValues = admin.getTableDescriptor(tableName).getFamily(family).getValues().size();
-    LOG.info(String.format("Altered the table %d times", newFamilyValues - familyValues));
-    assertTrue(newFamilyValues > familyValues); // at least one alter went
-                                                // through
-
-    ret = loadTool.run(new String[] { "-tn", tableName.getNameAsString(), "-read", "100:10",
-        "-num_keys", String.valueOf(numKeys), "-skip_init" });
-    if (0 != ret) {
-      String errorMsg = "Verify failed with error code " + ret;
-      LOG.error(errorMsg);
-      fail(errorMsg);
-    }
-
-    admin.close();
-    choreService.shutdown();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
index be80646..f09ac07 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.client.locking.LockServiceClient;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
@@ -97,7 +96,6 @@ public class TestLockProcedure {
     conf.setBoolean("hbase.procedure.check.owner.set", false);  // since rpc user will be null
     conf.setInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, HEARTBEAT_TIMEOUT);
     conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT);
-    conf.setInt(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, ZK_EXPIRATION);
   }
 
   @BeforeClass
@@ -386,12 +384,6 @@ public class TestLockProcedure {
     ProcedureTestingUtility.waitProcedure(procExec, procId);
     assertEquals(false, procExec.isRunning());
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    // Remove zk lock node otherwise recovered lock will keep waiting on it. Remove
-    // both exclusive and non-exclusive (the table shared lock that the region takes).
-    // Have to pause to let the locks 'expire' up in zk. See above configs where we
-    // set explict zk timeout on locks.
-    Thread.sleep(ZK_EXPIRATION + HEARTBEAT_TIMEOUT);
-    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapAllExpiredLocks();
     ProcedureTestingUtility.restart(procExec);
     while (!procExec.isStarted(procId)) {
       Thread.sleep(250);
@@ -442,7 +434,6 @@ public class TestLockProcedure {
     assertEquals(false, procExec.isRunning());
     ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
     // remove zk lock node otherwise recovered lock will keep waiting on it.
-    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapWriteLocks();
     ProcedureTestingUtility.restart(procExec);
     while (!procExec.isStarted(lockProc.getProcId())) {
       Thread.sleep(250);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
index a63ac03..efa45e7 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureSchedulerPerformanceEvaluation.java
@@ -28,7 +28,6 @@ import org.apache.commons.lang.ArrayUtils;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -243,8 +242,7 @@ public class MasterProcedureSchedulerPerformanceEvaluation extends AbstractHBase
 
   @Override
   protected int doWork() throws Exception {
-    procedureScheduler = new MasterProcedureScheduler(
-        UTIL.getConfiguration(), new TableLockManager.NullTableLockManager());
+    procedureScheduler = new MasterProcedureScheduler(UTIL.getConfiguration());
     procedureScheduler.start();
     setupOperations();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 7397168..dc60710 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import java.io.File;
 import java.io.IOException;
 import java.util.Arrays;
 
@@ -28,17 +27,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster;
-import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -58,7 +53,7 @@ public class TestMasterProcedureScheduler {
   @Before
   public void setUp() throws IOException {
     conf = HBaseConfiguration.create();
-    queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
+    queue = new MasterProcedureScheduler(conf);
     queue.start();
   }
 
@@ -334,35 +329,20 @@ public class TestMasterProcedureScheduler {
   }
 
   @Test
-  public void testSharedZkLock() throws Exception {
+  public void testSharedLock() throws Exception {
     final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-    final String dir = TEST_UTIL.getDataTestDir("TestSharedZkLock").toString();
-    MiniZooKeeperCluster zkCluster = new MiniZooKeeperCluster(conf);
-    int zkPort = zkCluster.startup(new File(dir));
 
-    try {
-      conf.set("hbase.zookeeper.quorum", "localhost:" + zkPort);
-
-      ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "testSchedWithZkLock", null, false);
-      ServerName mockName = ServerName.valueOf("localhost", 60000, 1);
-      MasterProcedureScheduler procQueue = new MasterProcedureScheduler(
-        conf,
-        TableLockManager.createTableLockManager(conf, zkw, mockName));
-
-      final TableName tableName = TableName.valueOf("testtb");
-      TestTableProcedure procA =
-          new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ);
-      TestTableProcedure procB =
-          new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ);
+    final TableName tableName = TableName.valueOf("testtb");
+    TestTableProcedure procA =
+        new TestTableProcedure(1, tableName, TableProcedureInterface.TableOperationType.READ);
+    TestTableProcedure procB =
+        new TestTableProcedure(2, tableName, TableProcedureInterface.TableOperationType.READ);
 
-      assertTrue(procQueue.tryAcquireTableSharedLock(procA, tableName));
-      assertTrue(procQueue.tryAcquireTableSharedLock(procB, tableName));
+    assertTrue(queue.tryAcquireTableSharedLock(procA, tableName));
+    assertTrue(queue.tryAcquireTableSharedLock(procB, tableName));
 
-      procQueue.releaseTableSharedLock(procA, tableName);
-      procQueue.releaseTableSharedLock(procB, tableName);
-    } finally {
-      zkCluster.shutdown();
-    }
+    queue.releaseTableSharedLock(procA, tableName);
+    queue.releaseTableSharedLock(procB, tableName);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
index 511b3de..a8192be 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureSchedulerConcurrency.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.procedure.TestMasterProcedureScheduler.TestTableProcedure;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.util.StringUtils;
@@ -44,7 +43,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 @Category({MasterTests.class, MediumTests.class})
@@ -59,7 +57,7 @@ public class TestMasterProcedureSchedulerConcurrency {
     conf = HBaseConfiguration.create();
     conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
         String.valueOf(HColumnDescriptor.MemoryCompaction.NONE));
-    queue = new MasterProcedureScheduler(conf, new TableLockManager.NullTableLockManager());
+    queue = new MasterProcedureScheduler(conf);
     queue.start();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
deleted file mode 100644
index c2f68a6..0000000
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMergeTableRegionsProcedure.java
+++ /dev/null
@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.master.procedure;
-
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.Admin;
-import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
-import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.testclassification.MediumTests;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.junit.experimental.categories.Category;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-@Category({MasterTests.class, MediumTests.class})
-public class TestMergeTableRegionsProcedure {
-  private static final Log LOG = LogFactory.getLog(TestMergeTableRegionsProcedure.class);
-
-  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
-
-  private static final int initialRegionCount = 4;
-  private final static byte[] FAMILY = Bytes.toBytes("FAMILY");
-  final static Configuration conf = UTIL.getConfiguration();
-  private static Admin admin;
-
-  private static void setupConf(Configuration conf) {
-    // Reduce the maximum attempts to speed up the test
-    conf.setInt("hbase.assignment.maximum.attempts", 3);
-    conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
-    conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);
-
-    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
-  }
-
-  @BeforeClass
-  public static void setupCluster() throws Exception {
-    setupConf(conf);
-    UTIL.startMiniCluster(1);
-    admin = UTIL.getHBaseAdmin();
-  }
-
-  @AfterClass
-  public static void cleanupTest() throws Exception {
-    try {
-      UTIL.shutdownMiniCluster();
-    } catch (Exception e) {
-      LOG.warn("failure shutting down cluster", e);
-    }
-  }
-
-  @Before
-  public void setup() throws Exception {
-    resetProcExecutorTestingKillFlag();
-    // Turn off balancer so it doesn't cut in and mess up our placements.
-    UTIL.getHBaseAdmin().setBalancerRunning(false, true);
-    // Turn off the meta scanner so it don't remove parent on us.
-    UTIL.getHBaseCluster().getMaster().setCatalogJanitorEnabled(false);
-    resetProcExecutorTestingKillFlag();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    resetProcExecutorTestingKillFlag();
-    for (HTableDescriptor htd: UTIL.getHBaseAdmin().listTables()) {
-      LOG.info("Tear down, remove table=" + htd.getTableName());
-      UTIL.deleteTable(htd.getTableName());
-    }
-  }
-
-  private void resetProcExecutorTestingKillFlag() {
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
-    assertTrue("expected executor to be running", procExec.isRunning());
-  }
-
-  /**
-   * This tests two region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeTwoRegions() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeTwoRegions");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, initialRegionCount - 1);
-  }
-
-  /**
-   * This tests two concurrent region merges
-   */
-  @Test(timeout=60000)
-  public void testMergeRegionsConcurrently() throws Exception {
-    final TableName tableName = TableName.valueOf("testMergeRegionsConcurrently");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    HRegionInfo[] regionsToMerge1 = new HRegionInfo[2];
-    HRegionInfo[] regionsToMerge2 = new HRegionInfo[2];
-    regionsToMerge1[0] = tableRegions.get(0);
-    regionsToMerge1[1] = tableRegions.get(1);
-    regionsToMerge2[0] = tableRegions.get(2);
-    regionsToMerge2[1] = tableRegions.get(3);
-
-    long procId1 = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge1, true));
-    long procId2 = procExec.submitProcedure(new MergeTableRegionsProcedure(
-      procExec.getEnvironment(), regionsToMerge2, true));
-    ProcedureTestingUtility.waitProcedure(procExec, procId1);
-    ProcedureTestingUtility.waitProcedure(procExec, procId2);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId1);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId2);
-    assertRegionCount(tableName, initialRegionCount - 2);
-  }
-
-  @Test(timeout=60000)
-  public void testRecoveryAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRecoveryAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(
-      new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
-
-    // Restart the executor and execute the step twice
-    int numberOfSteps = MergeTableRegionsState.values().length;
-    MasterProcedureTestingUtility.testRecoveryAndDoubleExecution(procExec, procId, numberOfSteps);
-    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
-
-    assertRegionCount(tableName, initialRegionCount - 1);
-  }
-
-  @Test(timeout = 60000)
-  public void testRollbackAndDoubleExecution() throws Exception {
-    final TableName tableName = TableName.valueOf("testRollbackAndDoubleExecution");
-    final ProcedureExecutor<MasterProcedureEnv> procExec = getMasterProcedureExecutor();
-
-    List<HRegionInfo> tableRegions = createTable(tableName);
-
-    ProcedureTestingUtility.waitNoProcedureRunning(procExec);
-    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
-
-    HRegionInfo[] regionsToMerge = new HRegionInfo[2];
-    regionsToMerge[0] = tableRegions.get(0);
-    regionsToMerge[1] = tableRegions.get(1);
-
-    long procId = procExec.submitProcedure(
-      new MergeTableRegionsProcedure(procExec.getEnvironment(), regionsToMerge, true));
-
-    // Failing before MERGE_TABLE_REGIONS_UPDATE_META we should trigger the rollback
-    // NOTE: the 6 (number before MERGE_TABLE_REGIONS_UPDATE_META step) is
-    // hardcoded, so you have to look at this test at least once when you add a new step.
-    int numberOfSteps = 6;
-    MasterProcedureTestingUtility.testRollbackAndDoubleExecution(procExec, procId, numberOfSteps);
-  }
-
-  private List<HRegionInfo> createTable(final TableName tableName)
-      throws Exception {
-    HTableDescriptor desc = new HTableDescriptor(tableName);
-    desc.addFamily(new HColumnDescriptor(FAMILY));
-    byte[][] splitRows = new byte[initialRegionCount - 1][];
-    for (int i = 0; i < splitRows.length; ++i) {
-      splitRows[i] = Bytes.toBytes(String.format("%d", i));
-    }
-    admin.createTable(desc, splitRows);
-    return assertRegionCount(tableName, initialRegionCount);
-  }
-
-  public List<HRegionInfo> assertRegionCount(final TableName tableName, final int nregions)
-      throws Exception {
-    UTIL.waitUntilNoRegionsInTransition();
-    List<HRegionInfo> tableRegions = admin.getTableRegions(tableName);
-    assertEquals(nregions, tableRegions.size());
-    return tableRegions;
-  }
-
-  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
-    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
index 2e7735b..3f3423e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMobStoreCompaction.java
@@ -245,7 +245,13 @@ public class TestMobStoreCompaction {
     region.compact(true);
     assertEquals("After compaction: store files", 1, countStoreFiles());
     // still have original mob hfiles and now added a mob del file
-    assertEquals("After compaction: mob files", numHfiles + 1, countMobFiles());
+    // CHANGED EXPECTATION WHEN LOCKING CHANGED. In this context, there is no locking because there
+    // is not regionserverservices provided on the region (it is null). In this case when
+    // no services and therefore no means of getting a lock, we will run the mob compaction
+    // with           compaction.getRequest().forceRetainDeleteMarkers();
+    // .. .this messes w/ expected number. It is one less than when we run
+    // with the locks.
+    assertEquals("After compaction: mob files", numHfiles, countMobFiles());
 
     Scan scan = new Scan();
     scan.setRaw(true);
@@ -263,11 +269,16 @@ public class TestMobStoreCompaction {
       results.clear();
       scanner.next(results);
     }
-    // assert the delete mark is not retained after the major compaction
-    assertEquals(0, deleteCount);
+    // Assert the delete mark is not retained after the major compaction
+    // See CHANGED EXPECTATION WHEN LOCKING CHANGED note above. Here too we have different
+    // expectation in the new locking regime.
+    // assertEquals(0, deleteCount);
     scanner.close();
     // assert the deleted cell is not counted
-    assertEquals("The cells in mob files", numHfiles - 1, countMobCellsInMobFiles(1));
+    // See CHANGED EXPECTATION WHEN LOCKING CHANGED note above. Here too we have different
+    // expectation in the new locking regime. We were passing '1' and we had numHFiles -1...
+    // but changed in below.
+    assertEquals("The cells in mob files", numHfiles, countMobCellsInMobFiles(0));
   }
 
   private int countStoreFiles() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
index e2a9bee..15766f5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerMetrics.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.test.MetricsAssertHelper;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread;
 import org.apache.hadoop.hbase.util.Threads;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -96,10 +97,11 @@ public class TestRegionServerMetrics {
     admin = TEST_UTIL.getHBaseAdmin();
     connection = TEST_UTIL.getConnection();
 
-    while (cluster.getLiveRegionServerThreads().size() < 1) {
+    while (cluster.getLiveRegionServerThreads().isEmpty() &&
+        cluster.getRegionServer(0) == null &&
+        rs.getRegionServerMetrics() == null) {
       Threads.sleep(100);
     }
-
     rs = cluster.getRegionServer(0);
     metricsRegionServer = rs.getRegionServerMetrics();
     serverSource = metricsRegionServer.getMetricsSource();
@@ -420,7 +422,6 @@ public class TestRegionServerMetrics {
       }
       metricsRegionServer.getRegionServerWrapper().forceRecompute();
       assertCounter("mobFlushCount", numHfiles);
-
       Scan scan = new Scan(Bytes.toBytes(0), Bytes.toBytes(numHfiles));
       ResultScanner scanner = table.getScanner(scan);
       scanner.next(100);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index 92eaecc..c8fe299 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -165,7 +165,6 @@ public class TestTokenAuthentication {
 
         @Override
         public ServiceDescriptor getDescriptorForType() {
-          // TODO Auto-generated method stub
           return null;
         }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
index fcd5258..257dfc5 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckOneRS.java
@@ -48,7 +48,6 @@ import org.apache.hadoop.hbase.io.hfile.TestHFile;
 import org.apache.hadoop.hbase.master.AssignmentManager;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.RegionStates;
-import org.apache.hadoop.hbase.master.TableLockManager;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.SplitTableRegionProcedure;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -1479,85 +1478,6 @@ public class TestHBaseFsckOneRS extends BaseTestHBaseFsck {
   }
 
   @Test(timeout=180000)
-  public void testCheckTableLocks() throws Exception {
-    IncrementingEnvironmentEdge edge = new IncrementingEnvironmentEdge(0);
-    EnvironmentEdgeManager.injectEdge(edge);
-    // check no errors
-    HBaseFsck hbck = doFsck(conf, false);
-    assertNoErrors(hbck);
-
-    ServerName mockName = ServerName.valueOf("localhost", 60000, 1);
-    final TableName tableName = TableName.valueOf("foo");
-
-    // obtain one lock
-    final TableLockManager tableLockManager =
-        TableLockManager.createTableLockManager(conf, TEST_UTIL.getZooKeeperWatcher(), mockName);
-    TableLockManager.TableLock
-        writeLock = tableLockManager.writeLock(tableName, "testCheckTableLocks");
-    writeLock.acquire();
-    hbck = doFsck(conf, false);
-    assertNoErrors(hbck); // should not have expired, no problems
-
-    edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire
-
-    hbck = doFsck(conf, false);
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK});
-
-    final CountDownLatch latch = new CountDownLatch(1);
-    new Thread() {
-      @Override
-      public void run() {
-        TableLockManager.TableLock
-            readLock = tableLockManager.writeLock(tableName, "testCheckTableLocks");
-        try {
-          latch.countDown();
-          readLock.acquire();
-        } catch (IOException ex) {
-          fail();
-        } catch (IllegalStateException ex) {
-          return; // expected, since this will be reaped under us.
-        }
-        fail("should not have come here");
-      };
-    }.start();
-
-    latch.await(); // wait until thread starts
-    Threads.sleep(300); // wait some more to ensure writeLock.acquire() is called
-
-    hbck = doFsck(conf, false);
-    // still one expired, one not-expired
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK});
-
-    edge.incrementTime(conf.getLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT,
-        TableLockManager.DEFAULT_TABLE_LOCK_EXPIRE_TIMEOUT_MS)); // let table lock expire
-
-    hbck = doFsck(conf, false);
-    assertErrors(hbck, new HBaseFsck.ErrorReporter.ERROR_CODE[] {
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK,
-        HBaseFsck.ErrorReporter.ERROR_CODE.EXPIRED_TABLE_LOCK}); // both are expired
-
-    Configuration localConf = new Configuration(conf);
-    // reaping from ZKInterProcessWriteLock uses znode cTime,
-    // which is not injectable through EnvironmentEdge
-    localConf.setLong(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, 1);
-
-    Threads.sleep(10);
-    hbck = doFsck(localConf, true); // now fix both cases
-
-    hbck = doFsck(localConf, false);
-    assertNoErrors(hbck);
-
-    // ensure that locks are deleted
-    writeLock = tableLockManager.writeLock(tableName, "should acquire without blocking");
-    writeLock.acquire(); // this should not block.
-    writeLock.release(); // release for clean state
-    tableLockManager.tableDeleted(tableName);
-  }
-
-  @Test(timeout=180000)
   public void testCheckReplication() throws Exception {
     // check no errors
     HBaseFsck hbck = doFsck(conf, false);

http://git-wip-us.apache.org/repos/asf/hbase/blob/76dc957f/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
index d1e774e..0c9b036 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/hbck/HbckTestingUtil.java
@@ -61,7 +61,6 @@ public class HbckTestingUtil {
       fsck.setFixVersionFile(fixVersionFile);
       fsck.setFixReferenceFiles(fixReferenceFiles);
       fsck.setFixEmptyMetaCells(fixEmptyMetaRegionInfo);
-      fsck.setFixTableLocks(fixTableLocks);
       fsck.setFixReplication(fixReplication);
       if (table != null) {
         fsck.includeTable(table);