You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/01/14 05:07:24 UTC

[1/4] hbase git commit: HBASE-16744 Procedure V2 - Lock procedures to allow clients to acquire locks on tables/namespaces/regions (Matteo Bertozzi)

Repository: hbase
Updated Branches:
  refs/heads/master 9fd5dab1a -> 4cb09a494


http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 7522e85..65eca6c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
 import org.apache.hadoop.hbase.HTableDescriptor;
@@ -55,15 +56,17 @@ import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
 import org.apache.hadoop.hbase.testclassification.CoprocessorTests;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -177,6 +180,10 @@ public class TestMasterObserver {
     private boolean postDispatchMergeCalled;
     private boolean preMergeRegionsCalled;
     private boolean postMergeRegionsCalled;
+    private boolean preRequestLockCalled;
+    private boolean postRequestLockCalled;
+    private boolean preLockHeartbeatCalled;
+    private boolean postLockHeartbeatCalled;
 
     public void enableBypass(boolean bypass) {
       this.bypass = bypass;
@@ -265,6 +272,10 @@ public class TestMasterObserver {
       postDispatchMergeCalled = false;
       preMergeRegionsCalled = false;
       postMergeRegionsCalled = false;
+      preRequestLockCalled = false;
+      postRequestLockCalled = false;
+      preLockHeartbeatCalled = false;
+      postLockHeartbeatCalled = false;
     }
 
     @Override
@@ -1497,7 +1508,38 @@ public class TestMasterObserver {
 
     @Override
     public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                   String groupName, boolean balancerRan) throws IOException {
+        String groupName, boolean balancerRan) throws IOException {
+    }
+
+    @Override
+    public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+        TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+        String description) throws IOException {
+      preRequestLockCalled = true;
+    }
+
+    @Override
+    public void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+        TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+        String description) throws IOException {
+      postRequestLockCalled = true;
+    }
+
+    @Override
+    public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+        LockProcedure proc, boolean keepAlive) throws IOException {
+      preLockHeartbeatCalled = true;
+    }
+
+    @Override
+    public void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+        LockProcedure proc, boolean keepAlive) throws IOException {
+      postLockHeartbeatCalled = true;
+    }
+
+    public boolean preAndPostForQueueLockAndHeartbeatLockCalled() {
+      return preRequestLockCalled && postRequestLockCalled && preLockHeartbeatCalled &&
+          postLockHeartbeatCalled;
     }
 
     @Override
@@ -2121,4 +2163,22 @@ public class TestMasterObserver {
     tableDeletionLatch.await();
     tableDeletionLatch = new CountDownLatch(1);
   }
+
+  @Test
+  public void testQueueLockAndLockHeartbeatOperations() throws Exception {
+    HMaster master = UTIL.getMiniHBaseCluster().getMaster();
+    CPMasterObserver cp = (CPMasterObserver)master.getMasterCoprocessorHost().findCoprocessor(
+        CPMasterObserver.class.getName());
+    cp.resetStates();
+
+    final TableName tableName = TableName.valueOf("testLockedTable");
+    long procId = master.getLockManager().remoteLocks().requestTableLock(tableName,
+          LockProcedure.LockType.EXCLUSIVE, "desc", HConstants.NO_NONCE, HConstants.NO_NONCE);
+    master.getLockManager().remoteLocks().lockHeartbeat(procId, false);
+
+    assertTrue(cp.preAndPostForQueueLockAndHeartbeatLockCalled());
+
+    ProcedureTestingUtility.waitNoProcedureRunning(master.getMasterProcedureExecutor());
+    ProcedureTestingUtility.assertProcNotFailed(master.getMasterProcedureExecutor(), procId);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 10368b4..28bf14a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.TableDescriptors;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -379,7 +380,7 @@ public class MockNoopMasterServices implements MasterServices, Server {
     return null;
   }
 
-   @Override
+  @Override
   public MasterProcedureManagerHost getMasterProcedureManagerHost() {
     return null;
   }
@@ -432,4 +433,9 @@ public class MockNoopMasterServices implements MasterServices, Server {
       throws ReplicationException, IOException {
     return null;
   }
+
+  @Override
+  public LockManager getLockManager() {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
index ec8054e..950ec92 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockRegionServer.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.locking.EntityLock;
 import org.apache.hadoop.hbase.executor.ExecutorService;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcServerInterface;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
new file mode 100644
index 0000000..1f3241d
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockManager.java
@@ -0,0 +1,161 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.locking;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.locking.TestLockProcedure;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestLockManager {
+  @Rule
+  public TestName testName = new TestName();
+  // crank this up if this test turns out to be flaky.
+  private static final int LOCAL_LOCKS_TIMEOUT = 1000;
+
+  private static final Log LOG = LogFactory.getLog(TestLockProcedure.class);
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static final Configuration conf = UTIL.getConfiguration();
+  private static MasterServices masterServices;
+
+  private static String namespace = "namespace";
+  private static TableName tableName = TableName.valueOf(namespace, "table");
+  private static HRegionInfo[] tableRegions;
+
+  private static void setupConf(Configuration conf) {
+    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+    conf.setBoolean("hbase.procedure.check.owner.set", false);  // since rpc user will be null
+    conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT);
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(1);
+    masterServices = UTIL.getMiniHBaseCluster().getMaster();
+    UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
+    UTIL.createTable(tableName, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()});
+    List<HRegionInfo> regions = UTIL.getAdmin().getTableRegions(tableName);
+    assert regions.size() > 0;
+    tableRegions = new HRegionInfo[regions.size()];
+    regions.toArray(tableRegions);
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+    try {
+      UTIL.shutdownMiniCluster();
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    for (ProcedureInfo procInfo : getMasterProcedureExecutor().listProcedures()) {
+      Procedure proc = getMasterProcedureExecutor().getProcedure(procInfo.getProcId());
+      if (proc instanceof LockProcedure) {
+        ((LockProcedure) proc).unlock(getMasterProcedureExecutor().getEnvironment());
+        ProcedureTestingUtility.waitProcedure(getMasterProcedureExecutor(), proc);
+      }
+    }
+    assertEquals(0, getMasterProcedureExecutor().getEnvironment().getProcedureScheduler().size());
+  }
+
+  private ProcedureExecutor<MasterProcedureEnv> getMasterProcedureExecutor() {
+    return UTIL.getHBaseCluster().getMaster().getMasterProcedureExecutor();
+  }
+
+  /**
+   * Tests that basic lock functionality works.
+   */
+  @Test
+  public void testMasterLockAcquire() throws Exception {
+    LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock(namespace,
+        LockProcedure.LockType.EXCLUSIVE, "desc");
+    assertTrue(lock.tryAcquire(2000));
+    assertTrue(lock.getProc().isLocked());
+    lock.release();
+    assertEquals(null, lock.getProc());
+  }
+
+  /**
+   * Two locks try to acquire lock on same table, assert that later one times out.
+   */
+  @Test
+  public void testMasterLockAcquireTimeout() throws Exception {
+    LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock(
+        tableName, LockProcedure.LockType.EXCLUSIVE, "desc");
+    LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock(
+        tableName, LockProcedure.LockType.EXCLUSIVE, "desc");
+    assertTrue(lock.tryAcquire(2000));
+    assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2));  // wait less than other lock's timeout
+    assertEquals(null, lock2.getProc());
+    lock.release();
+    assertTrue(lock2.tryAcquire(2000));
+    assertTrue(lock2.getProc().isLocked());
+    lock2.release();
+  }
+
+  /**
+   * Take region lock, they try table exclusive lock, later one should time out.
+   */
+  @Test
+  public void testMasterLockAcquireTimeoutRegionVsTableExclusive() throws Exception {
+    LockManager.MasterLock lock = masterServices.getLockManager().createMasterLock(
+        tableRegions, "desc");
+    LockManager.MasterLock lock2 = masterServices.getLockManager().createMasterLock(
+        tableName, LockProcedure.LockType.EXCLUSIVE, "desc");
+    assertTrue(lock.tryAcquire(2000));
+    assertFalse(lock2.tryAcquire(LOCAL_LOCKS_TIMEOUT/2));  // wait less than other lock's timeout
+    assertEquals(null, lock2.getProc());
+    lock.release();
+    assertTrue(lock2.tryAcquire(2000));
+    assertTrue(lock2.getProc().isLocked());
+    lock2.release();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
new file mode 100644
index 0000000..be80646
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/locking/TestLockProcedure.java
@@ -0,0 +1,456 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.locking;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.locking.LockServiceClient;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.MasterRpcServices;
+import org.apache.hadoop.hbase.master.TableLockManager;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.*;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.hamcrest.core.IsInstanceOf;
+import org.hamcrest.core.StringStartsWith;
+import org.junit.rules.TestRule;
+import org.junit.experimental.categories.Category;
+
+import org.junit.After;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.rules.TestName;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+@Category({MasterTests.class, SmallTests.class})
+public class TestLockProcedure {
+  @Rule
+  public final TestRule timeout = CategoryBasedTimeout.builder().
+    withTimeout(this.getClass()).withLookingForStuckThread(true).build();
+  @Rule
+  public final ExpectedException exception = ExpectedException.none();
+  @Rule
+  public TestName testName = new TestName();
+  // crank this up if this test turns out to be flaky.
+  private static final int HEARTBEAT_TIMEOUT = 1000;
+  private static final int LOCAL_LOCKS_TIMEOUT = 2000;
+  private static final int ZK_EXPIRATION = 2 * HEARTBEAT_TIMEOUT;
+
+  private static final Log LOG = LogFactory.getLog(TestLockProcedure.class);
+  protected static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+  private static MasterRpcServices masterRpcService;
+  private static ProcedureExecutor<MasterProcedureEnv> procExec;
+
+  private static String namespace = "namespace";
+  private static TableName tableName1 = TableName.valueOf(namespace, "table1");
+  private static List<HRegionInfo> tableRegions1;
+  private static TableName tableName2 = TableName.valueOf(namespace, "table2");
+  private static List<HRegionInfo> tableRegions2;
+
+  private String testMethodName;
+
+  private static void setupConf(Configuration conf) {
+    conf.setInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS, 1);
+    conf.setBoolean("hbase.procedure.check.owner.set", false);  // since rpc user will be null
+    conf.setInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF, HEARTBEAT_TIMEOUT);
+    conf.setInt(LockProcedure.LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF, LOCAL_LOCKS_TIMEOUT);
+    conf.setInt(TableLockManager.TABLE_LOCK_EXPIRE_TIMEOUT, ZK_EXPIRATION);
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws Exception {
+    setupConf(UTIL.getConfiguration());
+    UTIL.startMiniCluster(1);
+    UTIL.getAdmin().createNamespace(NamespaceDescriptor.create(namespace).build());
+    UTIL.createTable(tableName1, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()});
+    UTIL.createTable(tableName2, new byte[][]{"fam".getBytes()}, new byte[][] {"1".getBytes()});
+    masterRpcService = UTIL.getHBaseCluster().getMaster().getMasterRpcServices();
+    procExec = UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
+    tableRegions1 = UTIL.getAdmin().getTableRegions(tableName1);
+    tableRegions2 = UTIL.getAdmin().getTableRegions(tableName2);
+    assert tableRegions1.size() > 0;
+    assert tableRegions2.size() > 0;
+  }
+
+  @AfterClass
+  public static void cleanupTest() throws Exception {
+    try {
+      UTIL.shutdownMiniCluster();
+    } catch (Exception e) {
+      LOG.warn("failure shutting down cluster", e);
+    }
+  }
+
+  @Before
+  public void setup() throws Exception {
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    testMethodName = testName.getMethodName();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    // Kill all running procedures.
+    for (ProcedureInfo procInfo : procExec.listProcedures()) {
+      Procedure proc = procExec.getProcedure(procInfo.getProcId());
+      if (proc == null) continue;
+      procExec.abort(procInfo.getProcId());
+      ProcedureTestingUtility.waitProcedure(procExec, proc);
+    }
+    assertEquals(0, procExec.getEnvironment().getProcedureScheduler().size());
+  }
+
+  private LockRequest getNamespaceLock(String namespace, String description) {
+    return LockServiceClient.buildLockRequest(LockType.EXCLUSIVE,
+        namespace, null, null, description, HConstants.NO_NONCE, HConstants.NO_NONCE);
+  }
+
+  private LockRequest getTableExclusiveLock(TableName tableName, String description) {
+    return LockServiceClient.buildLockRequest(LockType.EXCLUSIVE,
+        null, tableName, null, description, HConstants.NO_NONCE, HConstants.NO_NONCE);
+  }
+
+  private LockRequest getRegionLock(List<HRegionInfo> regionInfos, String description) {
+    return LockServiceClient.buildLockRequest(LockType.EXCLUSIVE,
+        null, null, regionInfos, description, HConstants.NO_NONCE, HConstants.NO_NONCE);
+  }
+
+  private void validateLockRequestException(LockRequest lockRequest, String message)
+      throws Exception {
+    exception.expect(ServiceException.class);
+    exception.expectCause(IsInstanceOf.instanceOf(DoNotRetryIOException.class));
+    exception.expectMessage(
+        StringStartsWith.startsWith("org.apache.hadoop.hbase.DoNotRetryIOException: "
+            + "java.lang.IllegalArgumentException: " + message));
+    masterRpcService.requestLock(null, lockRequest);
+  }
+
+  @Test
+  public void testLockRequestValidationEmptyDescription() throws Exception {
+    validateLockRequestException(getNamespaceLock("", ""), "Empty description");
+  }
+
+  @Test
+  public void testLockRequestValidationEmptyNamespaceName() throws Exception {
+    validateLockRequestException(getNamespaceLock("", "desc"), "Empty namespace");
+  }
+
+  @Test
+  public void testLockRequestValidationRegionsFromDifferentTable() throws Exception {
+    List<HRegionInfo> regions = new ArrayList<>();
+    regions.addAll(tableRegions1);
+    regions.addAll(tableRegions2);
+    validateLockRequestException(getRegionLock(regions, "desc"),
+        "All regions should be from same table");
+  }
+
+  /**
+   * Returns immediately if the lock is acquired.
+   * @throws TimeoutException if lock couldn't be acquired.
+   */
+  private boolean awaitForLocked(long procId, long timeoutInMs) throws Exception {
+    long deadline = System.currentTimeMillis() + timeoutInMs;
+    while (System.currentTimeMillis() < deadline) {
+      LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null,
+          LockHeartbeatRequest.newBuilder().setProcId(procId).build());
+      if (response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) {
+        assertEquals(response.getTimeoutMs(), HEARTBEAT_TIMEOUT);
+        LOG.debug(String.format("Proc id %s acquired lock.", procId));
+        return true;
+      }
+      Thread.sleep(100);
+    }
+    return false;
+  }
+
+  private long queueLock(LockRequest lockRequest) throws ServiceException {
+    LockResponse response = masterRpcService.requestLock(null, lockRequest);
+    return response.getProcId();
+  }
+
+  private void sendHeartbeatAndCheckLocked(long procId, boolean isLocked) throws ServiceException {
+    LockHeartbeatResponse response = masterRpcService.lockHeartbeat(null,
+        LockHeartbeatRequest.newBuilder().setProcId(procId).build());
+    if (isLocked) {
+      assertEquals(LockHeartbeatResponse.LockStatus.LOCKED, response.getLockStatus());
+    } else {
+      assertEquals(LockHeartbeatResponse.LockStatus.UNLOCKED, response.getLockStatus());
+    }
+    LOG.debug(String.format("Proc id %s : %s.", procId, response.getLockStatus()));
+  }
+
+  private void releaseLock(long procId) throws ServiceException {
+    masterRpcService.lockHeartbeat(null,
+        LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build());
+  }
+
+  @Test
+  public void testUpdateHeartbeatAndUnlockForTable() throws Exception {
+    LockRequest lock = getTableExclusiveLock(tableName1, testMethodName);
+    final long procId = queueLock(lock);
+    assertTrue(awaitForLocked(procId, 2000));
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    releaseLock(procId);
+    sendHeartbeatAndCheckLocked(procId, false);
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
+
+  @Test
+  public void testAbort() throws Exception {
+    LockRequest lock = getTableExclusiveLock(tableName1, testMethodName);
+    final long procId = queueLock(lock);
+    assertTrue(awaitForLocked(procId, 2000));
+    assertTrue(procExec.abort(procId));
+    sendHeartbeatAndCheckLocked(procId, false);
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
+
+  @Test
+  public void testUpdateHeartbeatAndUnlockForNamespace() throws Exception {
+    LockRequest lock = getNamespaceLock(namespace, testMethodName);
+    final long procId = queueLock(lock);
+    assertTrue(awaitForLocked(procId, 2000));
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT /2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    releaseLock(procId);
+    sendHeartbeatAndCheckLocked(procId, false);
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
+
+  @Test
+  public void testTimeout() throws Exception {
+    LockRequest lock = getNamespaceLock(namespace, testMethodName);
+    final long procId = queueLock(lock);
+    assertTrue(awaitForLocked(procId, 2000));
+    Thread.sleep(HEARTBEAT_TIMEOUT / 2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT / 2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(2 * HEARTBEAT_TIMEOUT);
+    sendHeartbeatAndCheckLocked(procId, false);
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
+
+  @Test
+  public void testMultipleLocks() throws Exception {
+    LockRequest nsLock = getNamespaceLock(namespace, testMethodName);
+    LockRequest tableLock1 = getTableExclusiveLock(tableName1, testMethodName);
+    LockRequest tableLock2 =  getTableExclusiveLock(tableName2, testMethodName);
+    LockRequest regionsLock1 = getRegionLock(tableRegions1, testMethodName);
+    LockRequest regionsLock2 = getRegionLock(tableRegions2, testMethodName);
+    // Acquire namespace lock, then queue other locks.
+    long nsProcId = queueLock(nsLock);
+    assertTrue(awaitForLocked(nsProcId, 2000));
+    sendHeartbeatAndCheckLocked(nsProcId, true);
+    long table1ProcId = queueLock(tableLock1);
+    long table2ProcId = queueLock(tableLock2);
+    long regions1ProcId = queueLock(regionsLock1);
+    long regions2ProcId = queueLock(regionsLock2);
+
+    // Assert tables & region locks are waiting because of namespace lock.
+    Thread.sleep(HEARTBEAT_TIMEOUT / 2);
+    sendHeartbeatAndCheckLocked(nsProcId, true);
+    sendHeartbeatAndCheckLocked(table1ProcId, false);
+    sendHeartbeatAndCheckLocked(table2ProcId, false);
+    sendHeartbeatAndCheckLocked(regions1ProcId, false);
+    sendHeartbeatAndCheckLocked(regions2ProcId, false);
+
+    // Release namespace lock and assert tables locks are acquired but not region lock
+    releaseLock(nsProcId);
+    assertTrue(awaitForLocked(table1ProcId, 2000));
+    assertTrue(awaitForLocked(table2ProcId, 2000));
+    sendHeartbeatAndCheckLocked(regions1ProcId, false);
+    sendHeartbeatAndCheckLocked(regions2ProcId, false);
+
+    // Release table1 lock and assert region lock is acquired.
+    releaseLock(table1ProcId);
+    sendHeartbeatAndCheckLocked(table1ProcId, false);
+    assertTrue(awaitForLocked(regions1ProcId, 2000));
+    sendHeartbeatAndCheckLocked(table2ProcId, true);
+    sendHeartbeatAndCheckLocked(regions2ProcId, false);
+
+    // Release table2 lock and assert region lock is acquired.
+    releaseLock(table2ProcId);
+    sendHeartbeatAndCheckLocked(table2ProcId, false);
+    assertTrue(awaitForLocked(regions2ProcId, 2000));
+    sendHeartbeatAndCheckLocked(regions1ProcId, true);
+    sendHeartbeatAndCheckLocked(regions2ProcId, true);
+
+    // Release region locks.
+    releaseLock(regions1ProcId);
+    releaseLock(regions2ProcId);
+    sendHeartbeatAndCheckLocked(regions1ProcId, false);
+    sendHeartbeatAndCheckLocked(regions2ProcId, false);
+    ProcedureTestingUtility.waitAllProcedures(procExec);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, nsProcId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, table1ProcId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, table2ProcId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, regions1ProcId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, regions2ProcId);
+  }
+
+  // Test latch is decreased in count when lock is acquired.
+  @Test
+  public void testLatch() throws Exception {
+    CountDownLatch latch = new CountDownLatch(1);
+    // MasterRpcServices don't set latch with LockProcedure, so create one and submit it directly.
+    LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(),
+        TableName.valueOf("table"), LockProcedure.LockType.EXCLUSIVE, "desc", latch);
+    procExec.submitProcedure(lockProc);
+    assertTrue(latch.await(2000, TimeUnit.MILLISECONDS));
+    releaseLock(lockProc.getProcId());
+    ProcedureTestingUtility.waitProcedure(procExec, lockProc.getProcId());
+    ProcedureTestingUtility.assertProcNotFailed(procExec, lockProc.getProcId());
+  }
+
+  // LockProcedures with latch are considered local locks.
+  @Test
+  public void testLocalLockTimeout() throws Exception {
+    CountDownLatch latch = new CountDownLatch(1);
+    // MasterRpcServices don't set latch with LockProcedure, so create one and submit it directly.
+    LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(),
+        TableName.valueOf("table"), LockProcedure.LockType.EXCLUSIVE, "desc", latch);
+    procExec.submitProcedure(lockProc);
+    assertTrue(awaitForLocked(lockProc.getProcId(), 2000));
+    Thread.sleep(LOCAL_LOCKS_TIMEOUT / 2);
+    assertTrue(lockProc.isLocked());
+    Thread.sleep(2 * LOCAL_LOCKS_TIMEOUT);
+    assertFalse(lockProc.isLocked());
+    releaseLock(lockProc.getProcId());
+    ProcedureTestingUtility.waitProcedure(procExec, lockProc.getProcId());
+    ProcedureTestingUtility.assertProcNotFailed(procExec, lockProc.getProcId());
+  }
+
+  private void testRemoteLockRecovery(LockRequest lock) throws Exception {
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+    final long procId = queueLock(lock);
+    assertTrue(awaitForLocked(procId, 2000));
+
+    // wait for proc Executor to die, then restart it and wait for Lock Procedure to get started.
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    assertEquals(false, procExec.isRunning());
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    // Remove zk lock node otherwise recovered lock will keep waiting on it. Remove
+    // both exclusive and non-exclusive (the table shared lock that the region takes).
+    // Have to pause to let the locks 'expire' up in zk. See above configs where we
+    // set explict zk timeout on locks.
+    Thread.sleep(ZK_EXPIRATION + HEARTBEAT_TIMEOUT);
+    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapAllExpiredLocks();
+    ProcedureTestingUtility.restart(procExec);
+    while (!procExec.isStarted(procId)) {
+      Thread.sleep(250);
+    }
+    assertEquals(true, procExec.isRunning());
+
+    // After recovery, remote locks should reacquire locks and function normally.
+    assertTrue(awaitForLocked(procId, 2000));
+    Thread.sleep(HEARTBEAT_TIMEOUT/2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(HEARTBEAT_TIMEOUT/2);
+    sendHeartbeatAndCheckLocked(procId, true);
+    Thread.sleep(2 * HEARTBEAT_TIMEOUT);
+    sendHeartbeatAndCheckLocked(procId, false);
+    ProcedureTestingUtility.waitProcedure(procExec, procId);
+    ProcedureTestingUtility.assertProcNotFailed(procExec, procId);
+  }
+
+  @Test(timeout = 20000)
+  public void testRemoteTableLockRecovery() throws Exception {
+    LockRequest lock = getTableExclusiveLock(tableName1, testMethodName);
+    testRemoteLockRecovery(lock);
+  }
+
+  @Test(timeout = 20000)
+  public void testRemoteNamespaceLockRecovery() throws Exception {
+    LockRequest lock = getNamespaceLock(namespace, testMethodName);
+    testRemoteLockRecovery(lock);
+  }
+
+  @Test(timeout = 20000)
+  public void testRemoteRegionLockRecovery() throws Exception {
+    LockRequest lock = getRegionLock(tableRegions1, testMethodName);
+    testRemoteLockRecovery(lock);
+  }
+
+  @Test (timeout = 20000)
+  public void testLocalMasterLockRecovery() throws Exception {
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, true);
+    CountDownLatch latch = new CountDownLatch(1);
+    LockProcedure lockProc = new LockProcedure(UTIL.getConfiguration(),
+        TableName.valueOf("table"), LockProcedure.LockType.EXCLUSIVE, "desc", latch);
+    procExec.submitProcedure(lockProc);
+    assertTrue(latch.await(2000, TimeUnit.MILLISECONDS));
+
+    // wait for proc Executor to die, then restart it and wait for Lock Procedure to get started.
+    ProcedureTestingUtility.waitProcedure(procExec, lockProc.getProcId());
+    assertEquals(false, procExec.isRunning());
+    ProcedureTestingUtility.setKillAndToggleBeforeStoreUpdate(procExec, false);
+    // remove zk lock node otherwise recovered lock will keep waiting on it.
+    UTIL.getMiniHBaseCluster().getMaster().getTableLockManager().reapWriteLocks();
+    ProcedureTestingUtility.restart(procExec);
+    while (!procExec.isStarted(lockProc.getProcId())) {
+      Thread.sleep(250);
+    }
+    assertEquals(true, procExec.isRunning());
+    LockProcedure proc = (LockProcedure) procExec.getProcedure(lockProc.getProcId());
+    assertTrue(proc == null || !proc.isLocked());
+    ProcedureTestingUtility.waitProcedure(procExec, lockProc.getProcId());
+    ProcedureTestingUtility.assertProcNotFailed(procExec, lockProc.getProcId());
+  }
+}


[2/4] hbase git commit: HBASE-16744 Procedure V2 - Lock procedures to allow clients to acquire locks on tables/namespaces/regions (Matteo Bertozzi)

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-protocol-shaded/src/main/protobuf/LockService.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
new file mode 100644
index 0000000..0df7f2e
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -0,0 +1,79 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package hbase.pb;
+
+option java_package = "org.apache.hadoop.hbase.shaded.protobuf.generated";
+option java_outer_classname = "LockServiceProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+option optimize_for = SPEED;
+
+import "HBase.proto";
+
+enum LockType {
+  EXCLUSIVE = 1;
+  SHARED = 2;
+}
+
+message LockRequest {
+  required LockType lock_type = 1;
+  optional string namespace = 2;
+  optional TableName table_name = 3;
+  repeated RegionInfo region_info = 4;
+  optional string description = 5;
+  optional uint64 nonce_group = 6 [default = 0];
+  optional uint64 nonce = 7 [default = 0];
+}
+
+message LockResponse {
+  required uint64 proc_id = 1;
+}
+
+message LockHeartbeatRequest {
+  required uint64 proc_id = 1;
+  optional bool keep_alive = 2 [default = true];
+}
+
+message LockHeartbeatResponse {
+  enum LockStatus {
+    UNLOCKED = 1;
+    LOCKED = 2;
+  }
+
+  required LockStatus lock_status = 1;
+  // Timeout of lock (if locked).
+  optional uint32 timeout_ms = 2;
+}
+
+message LockProcedureData {
+  required LockType lock_type = 1;
+  optional string namespace = 2;
+  optional TableName table_name = 3;
+  repeated RegionInfo region_info = 4;
+  optional string description = 5;
+  optional bool is_master_lock = 6 [default = false];
+}
+
+service LockService {
+  /** Acquire lock on namespace/table/region */
+  rpc RequestLock(LockRequest) returns(LockResponse);
+
+  /** Keep alive (or not) a previously acquired lock */
+  rpc LockHeartbeat(LockHeartbeatRequest) returns(LockHeartbeatResponse);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
index 2065939..2dd3b00 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminEndpoint.java
@@ -20,19 +20,11 @@
 
 package org.apache.hadoop.hbase.rsgroup;
 
-import com.google.common.collect.Sets;
-import com.google.common.net.HostAndPort;
-import com.google.protobuf.RpcCallback;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.Service;
-
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Coprocessor;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -54,12 +46,12 @@ import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.locking.LockProcedure.LockType;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.AddRSGroupResponse;
@@ -80,12 +72,18 @@ import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.MoveTablesR
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RSGroupAdminService;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupRequest;
 import org.apache.hadoop.hbase.protobuf.generated.RSGroupAdminProtos.RemoveRSGroupResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 
-@InterfaceAudience.Private
-public class RSGroupAdminEndpoint extends RSGroupAdminService
-    implements CoprocessorService, Coprocessor, MasterObserver {
+import com.google.common.collect.Sets;
+import com.google.common.net.HostAndPort;
+import com.google.protobuf.RpcCallback;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.Service;
 
-  private static final Log LOG = LogFactory.getLog(RSGroupAdminEndpoint.class);
+@InterfaceAudience.Private
+public class RSGroupAdminEndpoint extends RSGroupAdminService implements CoprocessorService, 
+    Coprocessor, MasterObserver {
   private MasterServices master = null;
 
   private static RSGroupInfoManagerImpl groupInfoManager;
@@ -97,7 +95,7 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
     master = menv.getMasterServices();
     setGroupInfoManager(new RSGroupInfoManagerImpl(master));
     groupAdminServer = new RSGroupAdminServer(master, groupInfoManager);
-    Class clazz =
+    Class<?> clazz =
         master.getConfiguration().getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, null);
     if (!RSGroupableBalancer.class.isAssignableFrom(clazz)) {
       throw new IOException("Configured balancer is not a GroupableBalancer");
@@ -1181,4 +1179,28 @@ public class RSGroupAdminEndpoint extends RSGroupAdminService
       final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final HRegionInfo[] regionsToMerge) throws IOException {
   }
-}
+
+  @Override
+  public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+  }
+
+  @Override
+  public void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+  }
+
+  @Override
+  public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace, TableName tableName,
+      HRegionInfo[] regionInfos, LockType type, String description) throws IOException {
+    // TODO Auto-generated method stub
+    
+  }
+
+  @Override
+  public void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace, TableName tableName,
+      HRegionInfo[] regionInfos, LockType type, String description) throws IOException {
+    // TODO Auto-generated method stub
+    
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
new file mode 100644
index 0000000..990c76d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.locking;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
+import org.apache.hadoop.hbase.util.Threads;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Lock for HBase Entity either a Table, a Namespace, or Regions.
+ *
+ * These are remote locks which live on master, and need periodic heartbeats to keep them alive.
+ * (Once we request the lock, internally an heartbeat thread will be started on the client).
+ * If master does not receive the heartbeat in time, it'll release the lock and make it available
+ * to other users.
+ *
+ * <p>Use {@link LockServiceClient} to build instances. Then call {@link #requestLock()}.
+ * {@link #requestLock} will contact master to queue the lock and start the heartbeat thread
+ * which will check lock's status periodically and once the lock is acquired, it will send the
+ * heartbeats to the master.
+ *
+ * <p>Use {@link #await} or {@link #await(long, TimeUnit)} to wait for the lock to be acquired.
+ * Always call {@link #unlock()} irrespective of whether lock was acquired or not. If the lock
+ * was acquired, it'll be released. If it was not acquired, it is possible that master grants the
+ * lock in future and the heartbeat thread keeps it alive forever by sending heartbeats.
+ * Calling {@link #unlock()} will stop the heartbeat thread and cancel the lock queued on master.
+ *
+ * <p>There are 4 ways in which these remote locks may be released/can be lost:
+ * <ul><li>Call {@link #unlock}.</li>
+ * <li>Lock times out on master: Can happen because of network issues, GC pauses, etc.
+ *     Worker thread will call the given abortable as soon as it detects such a situation.</li>
+ * <li>Fail to contact master: If worker thread can not contact mater and thus fails to send
+ *     heartbeat before the timeout expires, it assumes that lock is lost and calls the
+ *     abortable.</li>
+ * <li>Worker thread is interrupted.</li>
+ * </ul>
+ *
+ * Use example:
+ * <code>
+ * EntityLock lock = lockServiceClient.*Lock(...., "exampled lock", abortable);
+ * lock.requestLock();
+ * ....
+ * ....can do other initializations here since lock is 'asynchronous'...
+ * ....
+ * if (lock.await(timeout)) {
+ *   ....logic requiring mutual exclusion
+ * }
+ * lock.unlock();
+ * </code>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
+public class EntityLock {
+  private static final Log LOG = LogFactory.getLog(EntityLock.class);
+
+  public static final String HEARTBEAT_TIME_BUFFER =
+      "hbase.client.locks.heartbeat.time.buffer.ms";
+
+  private final AtomicBoolean locked = new AtomicBoolean(false);
+  private final CountDownLatch latch = new CountDownLatch(1);
+
+  private final LockService.BlockingInterface stub;
+  private final LockHeartbeatWorker worker;
+  private final LockRequest lockRequest;
+  private final Abortable abort;
+
+  // Buffer for unexpected delays (GC, network delay, etc) in heartbeat rpc.
+  private final int heartbeatTimeBuffer;
+
+  // set to a non-zero value for tweaking sleep time during testing so that worker doesn't wait
+  // for long time periods between heartbeats.
+  private long testingSleepTime = 0;
+
+  private Long procId = null;
+
+  /**
+   * Abortable.abort() is called when the lease of the lock will expire.
+   * It's up to the user decide if simply abort the process or handle the loss of the lock
+   * by aborting the operation that was supposed to be under lock.
+   */
+  EntityLock(Configuration conf, LockService.BlockingInterface stub,
+      LockRequest request, Abortable abort) {
+    this.stub = stub;
+    this.lockRequest = request;
+    this.abort = abort;
+
+    this.heartbeatTimeBuffer = conf.getInt(HEARTBEAT_TIME_BUFFER, 10000);
+    this.worker = new LockHeartbeatWorker(lockRequest.getDescription());
+  }
+
+  @Override
+  public String toString() {
+    final StringBuilder sb = new StringBuilder();
+    sb.append("EntityLock locked=");
+    sb.append(locked.get());
+    sb.append(", procId=");
+    sb.append(procId);
+    sb.append(", type=");
+    sb.append(lockRequest.getLockType());
+    if (lockRequest.getRegionInfoCount() > 0) {
+      sb.append(", regions=");
+      for (int i = 0; i < lockRequest.getRegionInfoCount(); ++i) {
+        if (i > 0) sb.append(", ");
+        sb.append(lockRequest.getRegionInfo(i));
+      }
+    } else if (lockRequest.hasTableName()) {
+      sb.append(", table=");
+      sb.append(lockRequest.getTableName());
+    } else if (lockRequest.hasNamespace()) {
+      sb.append(", namespace=");
+      sb.append(lockRequest.getNamespace());
+    }
+    sb.append(", description=");
+    sb.append(lockRequest.getDescription());
+    return sb.toString();
+  }
+
+  @VisibleForTesting
+  void setTestingSleepTime(long timeInMillis) {
+    testingSleepTime = timeInMillis;
+  }
+
+  @VisibleForTesting
+  LockHeartbeatWorker getWorker() {
+    return worker;
+  }
+
+  public boolean isLocked() {
+    return locked.get();
+  }
+
+  /**
+   * Sends rpc to the master to request lock.
+   * The lock request is queued with other lock requests.
+   */
+  public void requestLock() throws IOException {
+    if (procId == null) {
+      try {
+        procId = stub.requestLock(null, lockRequest).getProcId();
+      } catch (Exception e) {
+        throw ProtobufUtil.handleRemoteException(e);
+      }
+      worker.start();
+    } else {
+      LOG.info("Lock already queued : " + toString());
+    }
+  }
+
+  /**
+   * @param timeout in milliseconds. If set to 0, waits indefinitely.
+   * @return true if lock was acquired; and false if waiting time elapsed before lock could be
+   * acquired.
+   */
+  public boolean await(long timeout, TimeUnit timeUnit) throws InterruptedException {
+    final boolean result = latch.await(timeout, timeUnit);
+    String lockRequestStr = lockRequest.toString().replace("\n", ", ");
+    if (result) {
+      LOG.info("Acquired " + lockRequestStr);
+    } else {
+      LOG.info(String.format("Failed acquire in %s %s of %s", timeout, timeUnit.toString(),
+          lockRequestStr));
+    }
+    return result;
+  }
+
+  public void await() throws InterruptedException {
+    latch.await();
+  }
+
+  public void unlock() throws IOException {
+    locked.set(false);
+    worker.interrupt();
+    Threads.shutdown(worker);
+    try {
+      stub.lockHeartbeat(null,
+        LockHeartbeatRequest.newBuilder().setProcId(procId).setKeepAlive(false).build());
+    } catch (Exception e) {
+      throw ProtobufUtil.handleRemoteException(e);
+    }
+  }
+
+  protected class LockHeartbeatWorker extends Thread {
+    public LockHeartbeatWorker(final String desc) {
+      super("LockHeartbeatWorker(" + desc + ")");
+    }
+
+    public void run() {
+      final LockHeartbeatRequest lockHeartbeatRequest =
+          LockHeartbeatRequest.newBuilder().setProcId(procId).build();
+
+      LockHeartbeatResponse response;
+      while (true) {
+        try {
+          response = stub.lockHeartbeat(null, lockHeartbeatRequest);
+        } catch (Exception e) {
+          e = ProtobufUtil.handleRemoteException(e);
+          locked.set(false);
+          LOG.error("Heartbeat failed, releasing " + EntityLock.this, e);
+          abort.abort("Heartbeat failed", e);
+          return;
+        }
+        if (!isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.LOCKED) {
+          locked.set(true);
+          latch.countDown();
+        } else if (isLocked() && response.getLockStatus() == LockHeartbeatResponse.LockStatus.UNLOCKED) {
+          // Lock timed out.
+          locked.set(false);
+          abort.abort("Lock timed out.", null);
+          return;
+        }
+
+        try {
+          // If lock not acquired yet, poll faster so we can notify faster.
+          long sleepTime = 1000;
+          if (isLocked()) {
+            // If lock acquired, then use lock timeout to determine heartbeat rate.
+            // If timeout is <heartbeatTimeBuffer, send back to back heartbeats.
+            sleepTime = Math.max(response.getTimeoutMs() - heartbeatTimeBuffer, 1);
+          }
+          if (testingSleepTime != 0) {
+            sleepTime = testingSleepTime;
+          }
+          Thread.sleep(sleepTime);
+        } catch (InterruptedException e) {
+          // Since there won't be any more heartbeats, assume lock will be lost.
+          locked.set(false);
+          LOG.error("Interrupted, releasing " + EntityLock.this, e);
+          abort.abort("Worker thread interrupted", e);
+          return;
+        }
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
new file mode 100644
index 0000000..e890afd
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
@@ -0,0 +1,111 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.locking;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.hbase.client.NonceGenerator;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
+
+/**
+ * Helper class to create "master locks" for namespaces, tables and regions.
+ * DEV-NOTE: At the moment this class is used only by the RS for MOB,
+ *           to prevent other MOB compaction to conflict.
+ *           The RS has already the stub of the LockService, so we have only one constructor that
+ *           takes the LockService stub. If in the future we are going to use this in other places
+ *           we should add a constructor that from conf or connection, creates the stub.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+@InterfaceStability.Evolving
+public class LockServiceClient {
+  private final LockService.BlockingInterface stub;
+  private final Configuration conf;
+  private final NonceGenerator ng;
+
+  public LockServiceClient(final Configuration conf, final LockService.BlockingInterface stub,
+      final NonceGenerator ng) {
+    this.conf = conf;
+    this.stub = stub;
+    this.ng = ng;
+  }
+
+  /**
+   * Create a new EntityLock object to acquire an exclusive or shared lock on a table.
+   * Internally, the table namespace will also be locked in shared mode.
+   */
+  public EntityLock tableLock(final TableName tableName, final boolean exclusive,
+      final String description, final Abortable abort) {
+    LockRequest lockRequest = buildLockRequest(exclusive ? LockType.EXCLUSIVE : LockType.SHARED,
+        tableName.getNameAsString(), null, null, description, ng.getNonceGroup(), ng.newNonce());
+    return new EntityLock(conf, stub, lockRequest, abort);
+  }
+
+  /**
+   * LocCreate a new EntityLock object to acquire exclusive lock on a namespace.
+   * Clients can not acquire shared locks on namespace.
+   */
+  public EntityLock namespaceLock(String namespace, String description, Abortable abort) {
+    LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE,
+        namespace, null, null, description, ng.getNonceGroup(), ng.newNonce());
+    return new EntityLock(conf, stub, lockRequest, abort);
+  }
+
+  /**
+   * Create a new EntityLock object to acquire exclusive lock on multiple regions of same tables.
+   * Internally, the table and its namespace will also be locked in shared mode.
+   */
+  public EntityLock regionLock(List<HRegionInfo> regionInfos, String description, Abortable abort) {
+    LockRequest lockRequest = buildLockRequest(LockType.EXCLUSIVE,
+        null, null, regionInfos, description, ng.getNonceGroup(), ng.newNonce());
+    return new EntityLock(conf, stub, lockRequest, abort);
+  }
+
+  @VisibleForTesting
+  public static LockRequest buildLockRequest(final LockType type,
+      final String namespace, final TableName tableName, final List<HRegionInfo> regionInfos,
+      final String description, final long nonceGroup, final long nonce) {
+    final LockRequest.Builder builder = LockRequest.newBuilder()
+      .setLockType(type)
+      .setNonceGroup(nonceGroup)
+      .setNonce(nonce);
+    if (regionInfos != null) {
+      for (HRegionInfo hri: regionInfos) {
+        builder.addRegionInfo(HRegionInfo.convert(hri));
+      }
+    } else if (namespace != null) {
+      builder.setNamespace(namespace);
+    } else if (tableName != null) {
+      builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    }
+    return builder.setDescription(description).build();
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
index 93b2085..00376cb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterAndRegionObserver.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -878,4 +879,26 @@ public class BaseMasterAndRegionObserver extends BaseRegionObserver
       final ObserverContext<MasterCoprocessorEnvironment> ctx,
       final HRegionInfo[] regionsToMerge) throws IOException {
   }
+
+  @Override
+  public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException {
+  }
+
+  @Override
+  public void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException {
+  }
+
+  @Override
+  public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+  }
+
+  @Override
+  public void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
index 23afe4b..461148b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseMasterObserver.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -1147,6 +1148,28 @@ public class BaseMasterObserver implements MasterObserver {
 
   @Override
   public void postBalanceRSGroup(ObserverContext<MasterCoprocessorEnvironment> ctx,
-                                 String groupName, boolean balancerRan) throws IOException {
+      String groupName, boolean balancerRan) throws IOException {
+  }
+
+  @Override
+  public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException {
+  }
+
+  @Override
+  public void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException {
+  }
+
+  @Override
+  public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
+  }
+
+  @Override
+  public void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException {
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index b0569e8..82b3cfa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -19,8 +19,6 @@
 
 package org.apache.hadoop.hbase.coprocessor;
 
-import com.google.common.net.HostAndPort;
-
 import java.io.IOException;
 import java.util.List;
 import java.util.Set;
@@ -40,13 +38,15 @@ import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.MasterSwitchType;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.master.RegionPlan;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
 
+import com.google.common.net.HostAndPort;
+
 /**
  * Defines coprocessor hooks for interacting with operations on the
  * {@link org.apache.hadoop.hbase.master.HMaster} process.
@@ -1693,7 +1693,7 @@ public interface MasterObserver extends Coprocessor {
       final String namespace, final Quotas quotas) throws IOException;
 
   /**
-   * Called before dispatching region merge request. 
+   * Called before dispatching region merge request.
    * It can't bypass the default action, e.g., ctx.bypass() won't have effect.
    * @param ctx coprocessor environment
    * @param regionA first region to be merged
@@ -1702,7 +1702,7 @@ public interface MasterObserver extends Coprocessor {
    */
   void preDispatchMerge(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       HRegionInfo regionA, HRegionInfo regionB) throws IOException;
-  
+
   /**
    * called after dispatching the region merge request.
    * @param c coprocessor environment
@@ -1971,4 +1971,30 @@ public interface MasterObserver extends Coprocessor {
   default void postListReplicationPeers(final ObserverContext<MasterCoprocessorEnvironment> ctx,
       String regex) throws IOException {
   }
+
+  /**
+   * Called before new LockProcedure is queued.
+   */
+  public void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException;
+
+  /**
+   * Called after new LockProcedure is queued.
+   */
+  public void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
+      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      String description) throws IOException;
+
+  /**
+   * Called before heartbeat to a lock.
+   */
+  public void preLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException;
+
+  /**
+   * Called after heartbeat to a lock.
+   */
+  public void postLockHeartbeat(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      LockProcedure proc, boolean keepAlive) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 75da73f..a41960b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -96,6 +96,7 @@ import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
 import org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
 import org.apache.hadoop.hbase.master.cleaner.LogCleaner;
 import org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner;
+import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleaner;
 import org.apache.hadoop.hbase.master.cleaner.ReplicationZKNodeCleanerChore;
 import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
@@ -322,6 +323,8 @@ public class HMaster extends HRegionServer implements MasterServices {
   // Maximum percent of regions in transition when balancing
   private final double maxRitPercent;
 
+  private final LockManager lockManager = new LockManager(this);
+
   private LoadBalancer balancer;
   private RegionNormalizer normalizer;
   private BalancerChore balancerChore;
@@ -674,7 +677,6 @@ public class HMaster extends HRegionServer implements MasterServices {
     this.mpmHost.register(new MasterFlushTableProcedureManager());
     this.mpmHost.loadProcedures(conf);
     this.mpmHost.initialize(this, this.metricsMaster);
-
   }
 
   /**
@@ -3289,4 +3291,9 @@ public class HMaster extends HRegionServer implements MasterServices {
         this.zooKeeper.prefix("Unable to remove drain for '" + server.getServerName() + "'."), ke);
     }
   }
+
+  @Override
+  public LockManager getLockManager() {
+    return lockManager;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 2b6ae39..2f17a5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
 import org.apache.hadoop.hbase.coprocessor.MasterObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -1789,4 +1790,46 @@ public class MasterCoprocessorHost
       }
     });
   }
+
+  public void preRequestLock(String namespace, TableName tableName, HRegionInfo[] regionInfos,
+      LockProcedure.LockType type, String description) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.preRequestLock(ctx, namespace, tableName, regionInfos, type, description);
+      }
+    });
+  }
+
+  public void postRequestLock(String namespace, TableName tableName, HRegionInfo[] regionInfos,
+      LockProcedure.LockType type, String description) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.postRequestLock(ctx, namespace, tableName, regionInfos, type, description);
+      }
+    });
+  }
+
+  public void preLockHeartbeat(LockProcedure proc, boolean keepAlive) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.preLockHeartbeat(ctx, proc, keepAlive);
+      }
+    });
+  }
+
+  public void postLockHeartbeat(LockProcedure proc, boolean keepAlive) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.postLockHeartbeat(ctx, proc, keepAlive);
+      }
+    });
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 732e678..5873986 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -51,6 +51,7 @@ import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.QosPriority;
 import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
@@ -72,6 +73,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpeci
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.*;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
@@ -108,7 +110,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.AccessController;
 import org.apache.hadoop.hbase.security.visibility.VisibilityController;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations;
@@ -125,7 +126,8 @@ import org.apache.zookeeper.KeeperException;
 @InterfaceAudience.Private
 @SuppressWarnings("deprecation")
 public class MasterRpcServices extends RSRpcServices
-      implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface {
+      implements MasterService.BlockingInterface, RegionServerStatusService.BlockingInterface,
+        LockService.BlockingInterface {
   private static final Log LOG = LogFactory.getLog(MasterRpcServices.class.getName());
 
   private final HMaster master;
@@ -1796,4 +1798,66 @@ public class MasterRpcServices extends RSRpcServices
 
     return response.build();
   }
+
+  @Override
+  public LockResponse requestLock(RpcController controller, LockRequest request)
+      throws ServiceException {
+    try {
+      if (request.getDescription().isEmpty()) {
+        throw new IllegalArgumentException("Empty description");
+      }
+      final long procId;
+      LockProcedure.LockType type = LockProcedure.LockType.valueOf(request.getLockType().name());
+      if (request.getRegionInfoCount() > 0) {
+        final HRegionInfo[] regionInfos = new HRegionInfo[request.getRegionInfoCount()];
+        for (int i = 0; i < request.getRegionInfoCount(); ++i) {
+          regionInfos[i] = HRegionInfo.convert(request.getRegionInfo(i));
+        }
+        procId = master.getLockManager().remoteLocks().requestRegionsLock(regionInfos,
+            request.getDescription(), request.getNonceGroup(), request.getNonce());
+        return LockResponse.newBuilder().setProcId(procId).build();
+      } else if (request.hasTableName()) {
+        final TableName tableName = ProtobufUtil.toTableName(request.getTableName());
+        procId = master.getLockManager().remoteLocks().requestTableLock(tableName, type,
+            request.getDescription(), request.getNonceGroup(), request.getNonce());
+        return LockResponse.newBuilder().setProcId(procId).build();
+      } else if (request.hasNamespace()) {
+        procId = master.getLockManager().remoteLocks().requestNamespaceLock(
+            request.getNamespace(), type, request.getDescription(),
+            request.getNonceGroup(), request.getNonce());
+      } else {
+        throw new IllegalArgumentException("one of table/namespace/region should be specified");
+      }
+      return LockResponse.newBuilder().setProcId(procId).build();
+    } catch (IllegalArgumentException e) {
+      LOG.warn("Exception when queuing lock", e);
+      throw new ServiceException(new DoNotRetryIOException(e));
+    } catch (IOException e) {
+      LOG.warn("Exception when queuing lock", e);
+      throw new ServiceException(e);
+    }
+  }
+
+  /**
+   * @return LOCKED, if procedure is found and it has the lock; else UNLOCKED.
+   * @throws ServiceException if given proc id is found but it is not a LockProcedure.
+   */
+  @Override
+  public LockHeartbeatResponse lockHeartbeat(RpcController controller, LockHeartbeatRequest request)
+      throws ServiceException {
+    try {
+      if (master.getLockManager().remoteLocks().lockHeartbeat(request.getProcId(),
+          request.getKeepAlive())) {
+        return LockHeartbeatResponse.newBuilder().setTimeoutMs(
+            master.getConfiguration().getInt(LockProcedure.REMOTE_LOCKS_TIMEOUT_MS_CONF,
+                LockProcedure.DEFAULT_REMOTE_LOCKS_TIMEOUT_MS))
+            .setLockStatus(LockHeartbeatResponse.LockStatus.LOCKED).build();
+      } else {
+        return LockHeartbeatResponse.newBuilder()
+            .setLockStatus(LockHeartbeatResponse.LockStatus.UNLOCKED).build();
+      }
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 060bdd8..5019eda 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.TableNotDisabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.executor.ExecutorService;
+import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
@@ -488,4 +489,8 @@ public interface MasterServices extends Server {
    */
   void removeDrainFromRegionServer(final ServerName server);
 
+  /**
+   * @return {@link LockManager} to lock namespaces/tables/regions.
+   */
+  LockManager getLockManager();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
new file mode 100644
index 0000000..8f99f5e
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
@@ -0,0 +1,271 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.locking;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.util.NonceKey;
+
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Functions to acquire lock on table/namespace/regions.
+ */
+@InterfaceAudience.Private
+public final class LockManager {
+  private static final Log LOG = LogFactory.getLog(LockManager.class);
+  private final HMaster master;
+  private final RemoteLocks remoteLocks;
+
+  public LockManager(HMaster master) {
+    this.master = master;
+    this.remoteLocks = new RemoteLocks();
+  }
+
+  public RemoteLocks remoteLocks() {
+    return remoteLocks;
+  }
+
+  public MasterLock createMasterLock(final String namespace,
+      final LockProcedure.LockType type, final String description) {
+    return new MasterLock(namespace, type, description);
+  }
+
+  public MasterLock createMasterLock(final TableName tableName,
+      final LockProcedure.LockType type, final String description) {
+    return new MasterLock(tableName, type, description);
+  }
+
+  public MasterLock createMasterLock(final HRegionInfo[] regionInfos, final String description) {
+    return new MasterLock(regionInfos, description);
+  }
+
+  private void submitProcedure(final LockProcedure proc, final long nonceGroup, final long nonce) {
+    proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser());
+    final NonceKey nonceKey = master.getMasterProcedureExecutor().createNonceKey(nonceGroup, nonce);
+    master.getMasterProcedureExecutor().submitProcedure(proc, nonceKey);
+  }
+
+  /**
+   * Locks on namespace/table/regions.
+   * Underneath, uses procedure framework and queues a {@link LockProcedure} which waits in a
+   * queue until scheduled.
+   * Use this lock instead LockManager.remoteLocks() for MASTER ONLY operations for two advantages:
+   * - no need of polling on LockProcedure to check if lock was acquired.
+   * - Generous timeout for lock preemption (default 10 min), no need to spawn thread for heartbeats.
+   * (timeout configuration {@link LockProcedure#DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS}).
+   */
+  public class MasterLock {
+    private final String namespace;
+    private final TableName tableName;
+    private final HRegionInfo[] regionInfos;
+    private final LockProcedure.LockType type;
+    private final String description;
+
+    private LockProcedure proc = null;
+
+    public MasterLock(final String namespace,
+        final LockProcedure.LockType type, final String description) {
+      this.namespace = namespace;
+      this.tableName = null;
+      this.regionInfos = null;
+      this.type = type;
+      this.description = description;
+    }
+
+    public MasterLock(final TableName tableName,
+        final LockProcedure.LockType type, final String description) {
+      this.namespace = null;
+      this.tableName = tableName;
+      this.regionInfos = null;
+      this.type = type;
+      this.description = description;
+    }
+
+    public MasterLock(final HRegionInfo[] regionInfos, final String description) {
+      this.namespace = null;
+      this.tableName = null;
+      this.regionInfos = regionInfos;
+      this.type = LockProcedure.LockType.EXCLUSIVE;
+      this.description = description;
+    }
+
+    /**
+     * Acquire the lock, waiting indefinitely until the lock is released or
+     * the thread is interrupted.
+     * @throws InterruptedException If current thread is interrupted while
+     *                              waiting for the lock
+     */
+    public boolean acquire() throws InterruptedException {
+      return tryAcquire(0);
+    }
+
+    /**
+     * Acquire the lock within a wait time.
+     * @param timeoutMs The maximum time (in milliseconds) to wait for the lock,
+     *                  0 to wait indefinitely
+     * @return True if the lock was acquired, false if waiting time elapsed
+     *         before the lock was acquired
+     * @throws InterruptedException If the thread is interrupted while waiting to
+     *                              acquire the lock
+     */
+    public boolean tryAcquire(final long timeoutMs) throws InterruptedException {
+      if (proc != null && proc.isLocked()) {
+        return true;
+      }
+      // Use new condition and procedure every time lock is requested.
+      final CountDownLatch lockAcquireLatch = new CountDownLatch(1);
+      if (regionInfos != null) {
+        proc = new LockProcedure(master.getConfiguration(), regionInfos, type,
+            description, lockAcquireLatch);
+      } else if (tableName != null) {
+        proc = new LockProcedure(master.getConfiguration(), tableName, type,
+            description, lockAcquireLatch);
+      } else if (namespace != null) {
+        proc = new LockProcedure(master.getConfiguration(), namespace, type,
+            description, lockAcquireLatch);
+      } else {
+        throw new UnsupportedOperationException("no namespace/table/region provided");
+      }
+
+      // The user of a MasterLock should be 'hbase', the only case where this is not true
+      // is if from inside a coprocessor we try to take a master lock (which should be avoided)
+      proc.setOwner(master.getMasterProcedureExecutor().getEnvironment().getRequestUser());
+      master.getMasterProcedureExecutor().submitProcedure(proc);
+
+      long deadline = (timeoutMs > 0) ? System.currentTimeMillis() + timeoutMs : Long.MAX_VALUE;
+      while (deadline >= System.currentTimeMillis() && !proc.isLocked()) {
+        try {
+          lockAcquireLatch.await(deadline - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
+        } catch (InterruptedException e) {
+          LOG.info("InterruptedException when waiting for lock: " + proc.toString());
+          // kind of weird, releasing a lock which is not locked. This is to make the procedure
+          // finish immediately whenever it gets scheduled so that it doesn't hold the lock.
+          release();
+          throw e;
+        }
+      }
+      if (!proc.isLocked()) {
+        LOG.info("Timed out waiting to acquire procedure lock: " + proc.toString());
+        release();
+        return false;
+      }
+      return true;
+    }
+
+    /**
+     * Release the lock.
+     * No-op if the lock was never acquired.
+     */
+    public void release() {
+      if (proc != null) {
+        proc.unlock(master.getMasterProcedureExecutor().getEnvironment());
+      }
+      proc = null;
+    }
+
+    @Override
+    public String toString() {
+      return "MasterLock: proc = " + proc.toString();
+    }
+
+    @VisibleForTesting
+    LockProcedure getProc() {
+      return proc;
+    }
+  }
+
+  /**
+   * Locks on namespace/table/regions for remote operations.
+   * Since remote operations are unreliable and the client/RS may die anytime and never release
+   * locks, regular heartbeats are required to keep the lock held.
+   */
+  public class RemoteLocks {
+    public long requestNamespaceLock(final String namespace, final LockProcedure.LockType type,
+        final String description, final long nonceGroup, final long nonce)
+        throws IllegalArgumentException, IOException {
+      master.getMasterCoprocessorHost().preRequestLock(namespace, null, null, type, description);
+
+      final LockProcedure proc = new LockProcedure(master.getConfiguration(), namespace,
+          type, description, null);
+      submitProcedure(proc, nonceGroup, nonce);
+
+      master.getMasterCoprocessorHost().postRequestLock(namespace, null, null, type, description);
+      return proc.getProcId();
+    }
+
+    public long requestTableLock(final TableName tableName, final LockProcedure.LockType type,
+        final String description, final long nonceGroup, final long nonce)
+        throws IllegalArgumentException, IOException {
+      master.getMasterCoprocessorHost().preRequestLock(null, tableName, null, type, description);
+
+      final LockProcedure proc = new LockProcedure(master.getConfiguration(), tableName,
+          type, description, null);
+      submitProcedure(proc, nonceGroup, nonce);
+
+      master.getMasterCoprocessorHost().postRequestLock(null, tableName, null, type, description);
+      return proc.getProcId();
+    }
+
+    /**
+     * @throws IllegalArgumentException if all regions are not from same table.
+     */
+    public long requestRegionsLock(final HRegionInfo[] regionInfos, final String description,
+        final long nonceGroup, final long nonce) throws IllegalArgumentException, IOException {
+      master.getMasterCoprocessorHost().preRequestLock(null, null, regionInfos,
+            LockProcedure.LockType.EXCLUSIVE, description);
+
+      final LockProcedure proc = new LockProcedure(master.getConfiguration(), regionInfos,
+          LockProcedure.LockType.EXCLUSIVE, description, null);
+      submitProcedure(proc, nonceGroup, nonce);
+
+      master.getMasterCoprocessorHost().postRequestLock(null, null, regionInfos,
+            LockProcedure.LockType.EXCLUSIVE, description);
+      return proc.getProcId();
+    }
+
+    /**
+     * @param keepAlive if false, release the lock.
+     * @return true, if procedure is found and it has the lock; else false.
+     */
+    public boolean lockHeartbeat(final long procId, final boolean keepAlive) throws IOException {
+      final LockProcedure proc = master.getMasterProcedureExecutor()
+        .getProcedure(LockProcedure.class, procId);
+      if (proc == null) return false;
+
+      master.getMasterCoprocessorHost().preLockHeartbeat(proc, keepAlive);
+
+      proc.updateHeartBeat();
+      if (!keepAlive) {
+        proc.unlock(master.getMasterProcedureExecutor().getEnvironment());
+      }
+
+      master.getMasterCoprocessorHost().postLockHeartbeat(proc, keepAlive);
+
+      return proc.isLocked();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
new file mode 100644
index 0000000..f793a65
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -0,0 +1,462 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.master.locking;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockProcedureData;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+@InterfaceAudience.Private
+public final class LockProcedure extends Procedure<MasterProcedureEnv>
+    implements TableProcedureInterface {
+  private static final Log LOG = LogFactory.getLog(LockProcedure.class);
+
+  public static final int DEFAULT_REMOTE_LOCKS_TIMEOUT_MS = 30000;  // timeout in ms
+  public static final String REMOTE_LOCKS_TIMEOUT_MS_CONF =
+      "hbase.master.procedure.remote.locks.timeout.ms";
+  // 10 min. Same as old ZK lock timeout.
+  public static final int DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS = 600000;
+  public static final String LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF =
+      "hbase.master.procedure.local.master.locks.timeout.ms";
+
+  // Also used in serialized states, changes will affect backward compatibility.
+  public enum LockType { SHARED, EXCLUSIVE }
+
+  private String namespace;
+  private TableName tableName;
+  private HRegionInfo[] regionInfos;
+  private LockType type;
+  // underlying namespace/table/region lock.
+  private LockInterface lock;
+  private TableOperationType opType;
+  private String description;
+  // True when recovery of master lock from WALs
+  private boolean recoveredMasterLock;
+  // this is for internal working
+  private boolean hasLock;
+
+  private final ProcedureEvent<LockProcedure> event = new ProcedureEvent<LockProcedure>(this);
+  // True if this proc acquired relevant locks. This value is for client checks.
+  private final AtomicBoolean locked = new AtomicBoolean(false);
+  // Last system time (in ms) when client sent the heartbeat.
+  // Initialize to system time for non-null value in case of recovery.
+  private final AtomicLong lastHeartBeat = new AtomicLong();
+  // Set to true when unlock request is received.
+  private final AtomicBoolean unlock = new AtomicBoolean(false);
+  // decreased when locks are acquired. Only used for local (with master process) purposes.
+  // Setting latch to non-null value increases default timeout to
+  // DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS (10 min) so that there is no need to heartbeat.
+  private final CountDownLatch lockAcquireLatch;
+
+  @Override
+  public TableName getTableName() {
+    return tableName;
+  }
+
+  @Override
+  public TableOperationType getTableOperationType() {
+    return opType;
+  }
+
+  private interface LockInterface {
+    boolean acquireLock(MasterProcedureEnv env);
+    void releaseLock(MasterProcedureEnv env);
+  }
+
+  public LockProcedure() {
+    lockAcquireLatch = null;
+  }
+
+  private LockProcedure(final Configuration conf, final LockType type,
+      final String description, final CountDownLatch lockAcquireLatch) {
+    this.type = type;
+    this.description = description;
+    this.lockAcquireLatch = lockAcquireLatch;
+    if (lockAcquireLatch == null) {
+      setTimeout(conf.getInt(REMOTE_LOCKS_TIMEOUT_MS_CONF, DEFAULT_REMOTE_LOCKS_TIMEOUT_MS));
+    } else {
+      setTimeout(conf.getInt(LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF,
+          DEFAULT_LOCAL_MASTER_LOCKS_TIMEOUT_MS));
+    }
+  }
+
+  /**
+   * Constructor for namespace lock.
+   * @param lockAcquireLatch if not null, the latch is decreased when lock is acquired.
+   */
+  public LockProcedure(final Configuration conf, final String namespace, final LockType type,
+      final String description, final CountDownLatch lockAcquireLatch)
+      throws IllegalArgumentException {
+    this(conf, type, description, lockAcquireLatch);
+
+    if (namespace.isEmpty()) {
+      throw new IllegalArgumentException("Empty namespace");
+    }
+
+    this.namespace = namespace;
+    this.lock = setupNamespaceLock();
+  }
+
+  /**
+   * Constructor for table lock.
+   * @param lockAcquireLatch if not null, the latch is decreased when lock is acquired.
+   */
+  public LockProcedure(final Configuration conf, final TableName tableName, final LockType type,
+      final String description, final CountDownLatch lockAcquireLatch)
+      throws IllegalArgumentException {
+    this(conf, type, description, lockAcquireLatch);
+
+    this.tableName = tableName;
+    this.lock = setupTableLock();
+  }
+
+  /**
+   * Constructor for region lock(s).
+   * @param lockAcquireLatch if not null, the latch is decreased when lock is acquired.
+   *                        Useful for locks acquired locally from master process.
+   * @throws IllegalArgumentException if all regions are not from same table.
+   */
+  public LockProcedure(final Configuration conf, final HRegionInfo[] regionInfos,
+      final LockType type, final String description, final CountDownLatch lockAcquireLatch)
+      throws IllegalArgumentException {
+    this(conf, type, description, lockAcquireLatch);
+
+    // Build HRegionInfo from region names.
+    if (regionInfos.length == 0) {
+      throw new IllegalArgumentException("No regions specified for region lock");
+    }
+
+    // check all regions belong to same table.
+    final TableName regionTable = regionInfos[0].getTable();
+    for (int i = 1; i < regionInfos.length; ++i) {
+      if (!regionInfos[i].getTable().equals(regionTable)) {
+        throw new IllegalArgumentException("All regions should be from same table");
+      }
+    }
+
+    this.regionInfos = regionInfos;
+    this.lock = setupRegionLock();
+  }
+
+  private boolean hasHeartbeatExpired() {
+    return System.currentTimeMillis() - lastHeartBeat.get() >= getTimeout();
+  }
+
+  /**
+   * Updates timeout deadline for the lock.
+   */
+  public void updateHeartBeat() {
+    lastHeartBeat.set(System.currentTimeMillis());
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Update heartbeat. Proc: " + toString());
+    }
+  }
+
+  /**
+   * Re run the procedure after every timeout to write new WAL entries so we don't hold back old
+   * WALs.
+   * @return false, so procedure framework doesn't mark this procedure as failure.
+   */
+  protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
+    synchronized (event) {
+      if (!event.isReady()) {  // maybe unlock() awakened the event.
+        setState(ProcedureProtos.ProcedureState.RUNNABLE);
+        env.getProcedureScheduler().wakeEvent(event);
+      }
+    }
+    return false;  // false: do not mark the procedure as failed.
+  }
+
+  // Can be called before procedure gets scheduled, in which case, the execute() will finish
+  // immediately and release the underlying locks.
+  public void unlock(final MasterProcedureEnv env) {
+    unlock.set(true);
+    locked.set(false);
+    // Maybe timeout already awakened the event and the procedure has finished.
+    synchronized (event) {
+      if (!event.isReady()) {
+        setState(ProcedureProtos.ProcedureState.RUNNABLE);
+        env.getProcedureScheduler().wakeEvent(event);
+      }
+    }
+  }
+
+  @Override
+  protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
+    // Local master locks don't store any state, so on recovery, simply finish this procedure
+    // immediately.
+    if (recoveredMasterLock) return null;
+    if (lockAcquireLatch != null) {
+      lockAcquireLatch.countDown();
+    }
+    if (unlock.get() || hasHeartbeatExpired()) {
+      locked.set(false);
+      LOG.debug((unlock.get() ? "UNLOCKED - " : "TIMED OUT - ") + toString());
+      return null;
+    }
+    synchronized (event) {
+      env.getProcedureScheduler().suspendEvent(event);
+      env.getProcedureScheduler().waitEvent(event, this);
+      setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT);
+    }
+    throw new ProcedureSuspendedException();
+  }
+
+  @Override
+  protected void rollback(final MasterProcedureEnv env) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  protected boolean abort(final MasterProcedureEnv env) {
+    unlock(env);
+    return true;
+  }
+
+  @Override
+  protected void serializeStateData(final OutputStream stream) throws IOException {
+    final LockProcedureData.Builder builder = LockProcedureData.newBuilder()
+          .setLockType(LockServiceProtos.LockType.valueOf(type.name()))
+          .setDescription(description);
+    if (regionInfos != null) {
+      for (int i = 0; i < regionInfos.length; ++i) {
+        builder.addRegionInfo(HRegionInfo.convert(regionInfos[i]));
+      }
+    } else if (namespace != null) {
+      builder.setNamespace(namespace);
+    } else if (tableName != null) {
+      builder.setTableName(ProtobufUtil.toProtoTableName(tableName));
+    }
+    if (lockAcquireLatch != null) {
+      builder.setIsMasterLock(true);
+    }
+    builder.build().writeDelimitedTo(stream);
+  }
+
+  @Override
+  protected void deserializeStateData(final InputStream stream) throws IOException {
+    final LockProcedureData state = LockProcedureData.parseDelimitedFrom(stream);
+    type = LockType.valueOf(state.getLockType().name());
+    description = state.getDescription();
+    if (state.getRegionInfoCount() > 0) {
+      regionInfos = new HRegionInfo[state.getRegionInfoCount()];
+      for (int i = 0; i < state.getRegionInfoCount(); ++i) {
+        regionInfos[i] = HRegionInfo.convert(state.getRegionInfo(i));
+      }
+    } else if (state.hasNamespace()) {
+      namespace = state.getNamespace();
+    } else if (state.hasTableName()) {
+      tableName = ProtobufUtil.toTableName(state.getTableName());
+    }
+    recoveredMasterLock = state.getIsMasterLock();
+    this.lock = setupLock();
+  }
+
+  @Override
+  protected boolean acquireLock(final MasterProcedureEnv env) {
+    boolean ret = lock.acquireLock(env);
+    locked.set(ret);
+    hasLock = ret;
+    if (ret) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("LOCKED - " + toString());
+      }
+      lastHeartBeat.set(System.currentTimeMillis());
+    }
+    return ret;
+  }
+
+  @Override
+  protected void releaseLock(final MasterProcedureEnv env) {
+    lock.releaseLock(env);
+    hasLock = false;
+  }
+
+  /**
+   * On recovery, re-execute from start to acquire the locks.
+   * Need to explicitly set it to RUNNABLE because the procedure might have been in WAITING_TIMEOUT
+   * state when crash happened. In which case, it'll be sent back to timeout queue on recovery,
+   * which we don't want since we want to require locks.
+   */
+  @Override
+  protected void beforeReplay(MasterProcedureEnv env) {
+    setState(ProcedureProtos.ProcedureState.RUNNABLE);
+  }
+
+  protected void toStringClassDetails(final StringBuilder builder) {
+    super.toStringClassDetails(builder);
+    if (regionInfos != null) {
+      builder.append(" regions=");
+      for (int i = 0; i < regionInfos.length; ++i) {
+        if (i > 0) builder.append(",");
+        builder.append(regionInfos[i].getShortNameToLog());
+      }
+    } else if (namespace != null) {
+      builder.append(", namespace=").append(namespace);
+    } else if (tableName != null) {
+      builder.append(", tableName=").append(tableName);
+    }
+    builder.append(", type=").append(type);
+  }
+
+  private LockInterface setupLock() throws IllegalArgumentException {
+    if (regionInfos != null) {
+      return setupRegionLock();
+    } else if (namespace != null) {
+      return setupNamespaceLock();
+    } else if (tableName != null) {
+      return setupTableLock();
+    } else {
+      LOG.error("Unknown level specified in proc - " + toString());
+      throw new IllegalArgumentException("no namespace/table/region provided");
+    }
+  }
+
+  private LockInterface setupNamespaceLock() throws IllegalArgumentException {
+    this.tableName = TableName.NAMESPACE_TABLE_NAME;
+    switch (type) {
+      case EXCLUSIVE:
+        this.opType = TableOperationType.EDIT;
+        return new NamespaceExclusiveLock();
+      case SHARED:
+        LOG.error("Shared lock on namespace not supported. Proc - " + toString());
+        throw new IllegalArgumentException("Shared lock on namespace not supported");
+      default:
+        LOG.error("Unexpected lock type in proc - " + toString());
+        throw new IllegalArgumentException("Wrong lock type: " + type.toString());
+    }
+  }
+
+  private LockInterface setupTableLock() throws IllegalArgumentException {
+    switch (type) {
+      case EXCLUSIVE:
+        this.opType = TableOperationType.EDIT;
+        return new TableExclusiveLock();
+      case SHARED:
+        this.opType = TableOperationType.READ;
+        return new TableSharedLock();
+      default:
+        LOG.error("Unexpected lock type in proc - " + toString());
+        throw new IllegalArgumentException("Wrong lock type:" + type.toString());
+    }
+  }
+
+  private LockInterface setupRegionLock() throws IllegalArgumentException {
+    this.tableName = regionInfos[0].getTable();
+    switch (type) {
+      case EXCLUSIVE:
+        this.opType = TableOperationType.REGION_EDIT;
+        return new RegionExclusiveLock();
+      default:
+        LOG.error("Only exclusive lock supported on regions. Proc - " + toString());
+        throw new IllegalArgumentException("Only exclusive lock supported on regions.");
+    }
+  }
+
+  public String getDescription() {
+    return description;
+  }
+
+  public boolean isLocked() {
+    return locked.get();
+  }
+
+  @Override
+  public boolean holdLock(final MasterProcedureEnv env) {
+    return true;
+  }
+
+  @Override
+  public boolean hasLock(final MasterProcedureEnv env) {
+    return hasLock;
+  }
+
+  ///////////////////////
+  // LOCK IMPLEMENTATIONS
+  ///////////////////////
+
+  private class TableExclusiveLock implements LockInterface {
+    @Override
+    public boolean acquireLock(final MasterProcedureEnv env) {
+      return env.getProcedureScheduler().tryAcquireTableExclusiveLock(LockProcedure.this, tableName);
+    }
+
+    @Override
+    public void releaseLock(final MasterProcedureEnv env) {
+      env.getProcedureScheduler().releaseTableExclusiveLock(LockProcedure.this, tableName);
+    }
+  }
+
+  private class TableSharedLock implements LockInterface {
+    @Override
+    public boolean acquireLock(final MasterProcedureEnv env) {
+      return env.getProcedureScheduler().tryAcquireTableSharedLock(LockProcedure.this, tableName);
+    }
+
+    @Override
+    public void releaseLock(final MasterProcedureEnv env) {
+      env.getProcedureScheduler().releaseTableSharedLock(LockProcedure.this, tableName);
+    }
+  }
+
+  private class NamespaceExclusiveLock implements LockInterface {
+    @Override
+    public boolean acquireLock(final MasterProcedureEnv env) {
+      return env.getProcedureScheduler().tryAcquireNamespaceExclusiveLock(
+          LockProcedure.this, namespace);
+    }
+
+    @Override
+    public void releaseLock(final MasterProcedureEnv env) {
+      env.getProcedureScheduler().releaseNamespaceExclusiveLock(
+          LockProcedure.this, namespace);
+    }
+  }
+
+  private class RegionExclusiveLock implements LockInterface {
+    @Override
+    public boolean acquireLock(final MasterProcedureEnv env) {
+      return !env.getProcedureScheduler().waitRegions(LockProcedure.this, tableName, regionInfos);
+    }
+
+    @Override
+    public void releaseLock(final MasterProcedureEnv env) {
+      env.getProcedureScheduler().wakeRegions(LockProcedure.this, tableName, regionInfos);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
index fedf951..cc84093 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/IdLock.java
@@ -46,7 +46,7 @@ public class IdLock {
   public static class Entry {
     private final long id;
     private int numWaiters;
-    private boolean isLocked = true;
+    private boolean locked = true;
 
     private Entry(long id) {
       this.id = id;
@@ -54,7 +54,7 @@ public class IdLock {
 
     public String toString() {
       return "id=" + id + ", numWaiter=" + numWaiters + ", isLocked="
-          + isLocked;
+          + locked;
     }
   }
 
@@ -74,9 +74,9 @@ public class IdLock {
     Entry existing;
     while ((existing = map.putIfAbsent(entry.id, entry)) != null) {
       synchronized (existing) {
-        if (existing.isLocked) {
+        if (existing.locked) {
           ++existing.numWaiters;  // Add ourselves to waiters.
-          while (existing.isLocked) {
+          while (existing.locked) {
             try {
               existing.wait();
             } catch (InterruptedException e) {
@@ -87,7 +87,7 @@ public class IdLock {
           }
 
           --existing.numWaiters;  // Remove ourselves from waiters.
-          existing.isLocked = true;
+          existing.locked = true;
           return existing;
         }
         // If the entry is not locked, it might already be deleted from the
@@ -107,7 +107,7 @@ public class IdLock {
    */
   public void releaseLockEntry(Entry entry) {
     synchronized (entry) {
-      entry.isLocked = false;
+      entry.locked = false;
       if (entry.numWaiters > 0) {
         entry.notify();
       } else {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
new file mode 100644
index 0000000..aa87d82
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/locking/TestEntityLocks.java
@@ -0,0 +1,182 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client.locking;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Abortable;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator;
+import org.apache.hadoop.hbase.HBaseIOException;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.*;
+import org.apache.hadoop.hbase.testclassification.ClientTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Threads;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mockito;
+import org.mortbay.log.Log;
+
+import static org.mockito.Mockito.*;
+import static org.junit.Assert.*;
+
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+
+@Category({ClientTests.class, SmallTests.class})
+public class TestEntityLocks {
+  private final Configuration conf = HBaseConfiguration.create();
+
+  private final LockService.BlockingInterface master =
+    Mockito.mock(LockService.BlockingInterface.class);
+
+  private LockServiceClient admin;
+  private ArgumentCaptor<LockRequest> lockReqArgCaptor;
+  private ArgumentCaptor<LockHeartbeatRequest> lockHeartbeatReqArgCaptor;
+
+  private static final LockHeartbeatResponse UNLOCKED_RESPONSE =
+      LockHeartbeatResponse.newBuilder().setLockStatus(
+          LockHeartbeatResponse.LockStatus.UNLOCKED).build();
+  // timeout such that worker thread waits for 500ms for each heartbeat.
+  private static final LockHeartbeatResponse LOCKED_RESPONSE =
+      LockHeartbeatResponse.newBuilder().setLockStatus(
+          LockHeartbeatResponse.LockStatus.LOCKED).setTimeoutMs(10000).build();
+  private long procId;
+
+  // Setup mock admin.
+  LockServiceClient getAdmin() throws Exception {
+    conf.setInt("hbase.client.retries.number", 3);
+    conf.setInt("hbase.client.pause", 1);  // 1ms. Immediately retry rpc on failure.
+    return new LockServiceClient(conf, master, PerClientRandomNonceGenerator.get());
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    admin = getAdmin();
+    lockReqArgCaptor = ArgumentCaptor.forClass(LockRequest.class);
+    lockHeartbeatReqArgCaptor = ArgumentCaptor.forClass(LockHeartbeatRequest.class);
+    procId = new Random().nextLong();
+  }
+
+  private boolean waitLockTimeOut(EntityLock lock, long maxWaitTimeMillis) {
+    long startMillis = System.currentTimeMillis();
+    while (lock.isLocked()) {
+      Log.info("Sleeping...");
+      Threads.sleepWithoutInterrupt(100);
+      if (!lock.isLocked()) {
+        return true;
+      }
+      if (System.currentTimeMillis() - startMillis > maxWaitTimeMillis) {
+        Log.info("Timedout...");
+        return false;
+      }
+    }
+    return true; // to make compiler happy.
+  }
+
+  /**
+   * Test basic lock function - requestLock, await, unlock.
+   * @throws Exception
+   */
+  @Test
+  public void testEntityLock() throws Exception {
+    final long procId = 100;
+    final long workerSleepTime = 200;  // in ms
+    EntityLock lock = admin.namespaceLock("namespace", "description", null);
+    lock.setTestingSleepTime(workerSleepTime);
+
+    when(master.requestLock(any(), any())).thenReturn(
+        LockResponse.newBuilder().setProcId(procId).build());
+    when(master.lockHeartbeat(any(), any())).thenReturn(
+        UNLOCKED_RESPONSE, UNLOCKED_RESPONSE, UNLOCKED_RESPONSE, LOCKED_RESPONSE);
+
+    lock.requestLock();
+    // we return unlock response 3 times, so actual wait time should be around 2 * workerSleepTime
+    lock.await(4 * workerSleepTime, TimeUnit.MILLISECONDS);
+    assertTrue(lock.isLocked());
+    lock.unlock();
+    assertTrue(!lock.getWorker().isAlive());
+    assertFalse(lock.isLocked());
+
+    // check LockRequest in requestLock()
+    verify(master, times(1)).requestLock(any(), lockReqArgCaptor.capture());
+    LockRequest request = lockReqArgCaptor.getValue();
+    assertEquals("namespace", request.getNamespace());
+    assertEquals("description", request.getDescription());
+    assertEquals(LockType.EXCLUSIVE, request.getLockType());
+    assertEquals(0, request.getRegionInfoCount());
+
+    // check LockHeartbeatRequest in lockHeartbeat()
+    verify(master, atLeastOnce()).lockHeartbeat(any(), lockHeartbeatReqArgCaptor.capture());
+    for (LockHeartbeatRequest req : lockHeartbeatReqArgCaptor.getAllValues()) {
+      assertEquals(procId, req.getProcId());
+    }
+  }
+
+  /**
+   * Test that abort is called when lock times out.
+   */
+  @Test
+  public void testEntityLockTimeout() throws Exception {
+    final long workerSleepTime = 200;  // in ms
+    Abortable abortable = Mockito.mock(Abortable.class);
+    EntityLock lock = admin.namespaceLock("namespace", "description", abortable);
+    lock.setTestingSleepTime(workerSleepTime);
+
+    when(master.requestLock(any(), any()))
+        .thenReturn(LockResponse.newBuilder().setProcId(procId).build());
+    // Acquires the lock, but then it times out (since we don't call unlock() on it).
+    when(master.lockHeartbeat(any(), any()))
+      .thenReturn(LOCKED_RESPONSE, UNLOCKED_RESPONSE);
+
+    lock.requestLock();
+    lock.await();
+    assertTrue(lock.isLocked());
+    // Should get unlocked in next heartbeat i.e. after workerSleepTime. Wait 2x time.
+    assertTrue(waitLockTimeOut(lock, 2 * workerSleepTime));
+    assertFalse(lock.getWorker().isAlive());
+    verify(abortable, times(1)).abort(any(), eq(null));
+  }
+
+  /**
+   * Test that abort is called when lockHeartbeat fails with IOException.
+   */
+  @Test
+  public void testHeartbeatException() throws Exception {
+    final long workerSleepTime = 100;  // in ms
+    Abortable abortable = Mockito.mock(Abortable.class);
+    EntityLock lock = admin.namespaceLock("namespace", "description", abortable);
+    lock.setTestingSleepTime(workerSleepTime);
+
+    when(master.requestLock(any(), any()))
+        .thenReturn(LockResponse.newBuilder().setProcId(procId).build());
+    when(master.lockHeartbeat(any(), any()))
+        .thenReturn(LOCKED_RESPONSE)
+        .thenThrow(new ServiceException("Failed heartbeat!"));
+
+    lock.requestLock();
+    lock.await();
+    assertTrue(waitLockTimeOut(lock, 100 * workerSleepTime));
+    verify(abortable, times(1)).abort(any(), isA(HBaseIOException.class));
+    assertFalse(lock.getWorker().isAlive());
+  }
+}


[4/4] hbase git commit: HBASE-16744 Procedure V2 - Lock procedures to allow clients to acquire locks on tables/namespaces/regions (Matteo Bertozzi)

Posted by st...@apache.org.
HBASE-16744 Procedure V2 - Lock procedures to allow clients to acquire
locks on tables/namespaces/regions (Matteo Bertozzi)

Incorporates review comments from
    https://reviews.apache.org/r/52589/
    https://reviews.apache.org/r/54388/

M hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
 Fix for eclipse complaint (from Duo Zhang)

M hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
M hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
M hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
 Log formatting

M hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
 Added wait procedures utility.

A hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
A hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
 Implement new locking CP overrides.

A hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/EntityLock.java
 New hbase entity lock (ns, table, or regions)

A hbase-server/src/main/java/org/apache/hadoop/hbase/client/locking/LockServiceClient.java
 Client that can use the new internal locking service.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4cb09a49
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4cb09a49
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4cb09a49

Branch: refs/heads/master
Commit: 4cb09a494c4148de2b4e8c6cd011bacdf7f33b1a
Parents: 9fd5dab1
Author: Michael Stack <st...@apache.org>
Authored: Wed Jan 11 14:38:59 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Fri Jan 13 21:07:03 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/AsyncTableBase.java     |    4 +-
 .../hadoop/hbase/procedure2/Procedure.java      |   10 +-
 .../hbase/procedure2/ProcedureExecutor.java     |   98 +-
 .../procedure2/store/wal/WALProcedureStore.java |    8 +-
 .../procedure2/ProcedureTestingUtility.java     |   12 +
 .../protobuf/generated/LockServiceProtos.java   | 5328 ++++++++++++++++++
 .../src/main/protobuf/LockService.proto         |   79 +
 .../hbase/rsgroup/RSGroupAdminEndpoint.java     |   54 +-
 .../hadoop/hbase/client/locking/EntityLock.java |  266 +
 .../hbase/client/locking/LockServiceClient.java |  111 +
 .../BaseMasterAndRegionObserver.java            |   23 +
 .../hbase/coprocessor/BaseMasterObserver.java   |   25 +-
 .../hbase/coprocessor/MasterObserver.java       |   36 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |    9 +-
 .../hbase/master/MasterCoprocessorHost.java     |   43 +
 .../hadoop/hbase/master/MasterRpcServices.java  |   68 +-
 .../hadoop/hbase/master/MasterServices.java     |    5 +
 .../hbase/master/locking/LockManager.java       |  271 +
 .../hbase/master/locking/LockProcedure.java     |  462 ++
 .../org/apache/hadoop/hbase/util/IdLock.java    |   12 +-
 .../hbase/client/locking/TestEntityLocks.java   |  182 +
 .../hbase/coprocessor/TestMasterObserver.java   |   64 +-
 .../hbase/master/MockNoopMasterServices.java    |    8 +-
 .../hadoop/hbase/master/MockRegionServer.java   |    1 +
 .../hbase/master/locking/TestLockManager.java   |  161 +
 .../hbase/master/locking/TestLockProcedure.java |  456 ++
 26 files changed, 7709 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
index 19a22c0..d80627f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBase.java
@@ -386,8 +386,8 @@ public interface AsyncTableBase {
    * @return A list of {@link CompletableFuture}s that represent the existence for each get.
    */
   default List<CompletableFuture<Boolean>> exists(List<Get> gets) {
-    return get(toCheckExistenceOnly(gets)).stream().map(f -> f.thenApply(r -> r.getExists()))
-        .collect(toList());
+    return get(toCheckExistenceOnly(gets)).stream().
+        <CompletableFuture<Boolean>>map(f -> f.thenApply(r -> r.getExists())).collect(toList());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
index cb4ee47..3f3cf33 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java
@@ -243,24 +243,24 @@ public abstract class Procedure<TEnvironment> implements Comparable<Procedure> {
     final StringBuilder sb = new StringBuilder();
     toStringClassDetails(sb);
 
-    sb.append(" id=");
+    sb.append(", procId=");
     sb.append(getProcId());
 
     if (hasParent()) {
-      sb.append(" parent=");
+      sb.append(", parent=");
       sb.append(getParentProcId());
     }
 
     if (hasOwner()) {
-      sb.append(" owner=");
+      sb.append(", owner=");
       sb.append(getOwner());
     }
 
-    sb.append(" state=");
+    sb.append(", state=");
     toStringState(sb);
 
     if (hasException()) {
-      sb.append(" failed=" + getException());
+      sb.append(", failed=" + getException());
     }
 
     return sb;

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
index c65f3fb..d3b65e8 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java
@@ -28,6 +28,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
@@ -91,7 +92,7 @@ public class ProcedureExecutor<TEnvironment> {
       final boolean kill = this.killBeforeStoreUpdate;
       if (this.toggleKillBeforeStoreUpdate) {
         this.killBeforeStoreUpdate = !kill;
-        LOG.warn("Toggle Kill before store update to: " + this.killBeforeStoreUpdate);
+        LOG.warn("Toggle KILL before store update to: " + this.killBeforeStoreUpdate);
       }
       return kill;
     }
@@ -172,7 +173,7 @@ public class ProcedureExecutor<TEnvironment> {
 
       final long now = EnvironmentEdgeManager.currentTime();
       final Iterator<Map.Entry<Long, ProcedureInfo>> it = completed.entrySet().iterator();
-      final boolean isDebugEnabled = LOG.isDebugEnabled();
+      final boolean debugEnabled = LOG.isDebugEnabled();
       while (it.hasNext() && store.isRunning()) {
         final Map.Entry<Long, ProcedureInfo> entry = it.next();
         final ProcedureInfo procInfo = entry.getValue();
@@ -180,8 +181,8 @@ public class ProcedureExecutor<TEnvironment> {
         // TODO: Select TTL based on Procedure type
         if ((procInfo.hasClientAckTime() && (now - procInfo.getClientAckTime()) >= evictAckTtl) ||
             (now - procInfo.getLastUpdate()) >= evictTtl) {
-          if (isDebugEnabled) {
-            LOG.debug("Evict completed procedure: " + procInfo);
+          if (debugEnabled) {
+            LOG.debug("Evict completed " + procInfo);
           }
           batchIds[batchCount++] = entry.getKey();
           if (batchCount == batchIds.length) {
@@ -281,7 +282,7 @@ public class ProcedureExecutor<TEnvironment> {
       @Override
       public void setMaxProcId(long maxProcId) {
         assert lastProcId.get() < 0 : "expected only one call to setMaxProcId()";
-        LOG.debug("load procedures maxProcId=" + maxProcId);
+        LOG.debug("Load maxProcId=" + maxProcId);
         lastProcId.set(maxProcId);
       }
 
@@ -295,7 +296,7 @@ public class ProcedureExecutor<TEnvironment> {
         int corruptedCount = 0;
         while (procIter.hasNext()) {
           ProcedureInfo proc = procIter.nextAsProcedureInfo();
-          LOG.error("corrupted procedure: " + proc);
+          LOG.error("Corrupt " + proc);
           corruptedCount++;
         }
         if (abortOnCorruption && corruptedCount > 0) {
@@ -307,7 +308,7 @@ public class ProcedureExecutor<TEnvironment> {
 
   private void loadProcedures(final ProcedureIterator procIter,
       final boolean abortOnCorruption) throws IOException {
-    final boolean isDebugEnabled = LOG.isDebugEnabled();
+    final boolean debugEnabled = LOG.isDebugEnabled();
 
     // 1. Build the rollback stack
     int runnablesCount = 0;
@@ -320,8 +321,8 @@ public class ProcedureExecutor<TEnvironment> {
         nonceKey = proc.getNonceKey();
         procId = proc.getProcId();
         completed.put(proc.getProcId(), proc);
-        if (isDebugEnabled) {
-          LOG.debug("The procedure is completed: " + proc);
+        if (debugEnabled) {
+          LOG.debug("Completed " + proc);
         }
       } else {
         Procedure proc = procIter.nextAsProcedure();
@@ -361,8 +362,8 @@ public class ProcedureExecutor<TEnvironment> {
       Procedure proc = procIter.nextAsProcedure();
       assert !(proc.isFinished() && !proc.hasParent()) : "unexpected completed proc=" + proc;
 
-      if (isDebugEnabled) {
-        LOG.debug(String.format("Loading procedure state=%s isFailed=%s: %s",
+      if (debugEnabled) {
+        LOG.debug(String.format("Loading state=%s isFailed=%s: %s",
                     proc.getState(), proc.hasException(), proc));
       }
 
@@ -425,7 +426,7 @@ public class ProcedureExecutor<TEnvironment> {
       if (procStack.isValid()) continue;
 
       for (Procedure proc: procStack.getSubproceduresStack()) {
-        LOG.error("corrupted procedure: " + proc);
+        LOG.error("Corrupted " + proc);
         procedures.remove(proc.getProcId());
         runnableList.remove(proc);
         if (waitingSet != null) waitingSet.remove(proc);
@@ -485,7 +486,7 @@ public class ProcedureExecutor<TEnvironment> {
     // We have numThreads executor + one timer thread used for timing out
     // procedures and triggering periodic procedures.
     this.corePoolSize = numThreads;
-    LOG.info("Starting procedure executor threads=" + corePoolSize);
+    LOG.info("Starting executor threads=" + corePoolSize);
 
     // Create the Thread Group for the executors
     threadGroup = new ThreadGroup("ProcedureExecutor");
@@ -506,7 +507,7 @@ public class ProcedureExecutor<TEnvironment> {
     st = EnvironmentEdgeManager.currentTime();
     store.recoverLease();
     et = EnvironmentEdgeManager.currentTime();
-    LOG.info(String.format("recover procedure store (%s) lease: %s",
+    LOG.info(String.format("Recover store (%s) lease: %s",
       store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
 
     // start the procedure scheduler
@@ -520,11 +521,11 @@ public class ProcedureExecutor<TEnvironment> {
     st = EnvironmentEdgeManager.currentTime();
     load(abortOnCorruption);
     et = EnvironmentEdgeManager.currentTime();
-    LOG.info(String.format("load procedure store (%s): %s",
+    LOG.info(String.format("Load store (%s): %s",
       store.getClass().getSimpleName(), StringUtils.humanTimeDiff(et - st)));
 
     // Start the executors. Here we must have the lastProcId set.
-    LOG.debug("start workers " + workerThreads.size());
+    LOG.debug("Start workers " + workerThreads.size());
     timeoutExecutor.start();
     for (WorkerThread worker: workerThreads) {
       worker.start();
@@ -542,7 +543,7 @@ public class ProcedureExecutor<TEnvironment> {
       return;
     }
 
-    LOG.info("Stopping the procedure executor");
+    LOG.info("Stopping");
     scheduler.stop();
     timeoutExecutor.sendStopSignal();
   }
@@ -564,7 +565,7 @@ public class ProcedureExecutor<TEnvironment> {
     try {
       threadGroup.destroy();
     } catch (IllegalThreadStateException e) {
-      LOG.error("thread group " + threadGroup + " contains running threads");
+      LOG.error("Thread group " + threadGroup + " contains running threads");
       threadGroup.list();
     } finally {
       threadGroup = null;
@@ -693,12 +694,12 @@ public class ProcedureExecutor<TEnvironment> {
 
     // we found a registered nonce, but the procedure may not have been submitted yet.
     // since the client expect the procedure to be submitted, spin here until it is.
-    final boolean isTraceEnabled = LOG.isTraceEnabled();
+    final boolean traceEnabled = LOG.isTraceEnabled();
     while (isRunning() &&
            !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) &&
            nonceKeysToProcIdsMap.containsKey(nonceKey)) {
-      if (isTraceEnabled) {
-        LOG.trace("waiting for procId=" + oldProcId.longValue() + " to be submitted");
+      if (traceEnabled) {
+        LOG.trace("Waiting for procId=" + oldProcId.longValue() + " to be submitted");
       }
       Threads.sleep(100);
     }
@@ -787,7 +788,7 @@ public class ProcedureExecutor<TEnvironment> {
     // Commit the transaction
     store.insert(proc, null);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Procedure " + proc + " added to the store.");
+      LOG.debug("Stored " + proc);
     }
 
     // Add the procedure to the executor
@@ -811,7 +812,7 @@ public class ProcedureExecutor<TEnvironment> {
     // Commit the transaction
     store.insert(procs);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Procedures added to the store: " + Arrays.toString(procs));
+      LOG.debug("Stored " + Arrays.toString(procs));
     }
 
     // Add the procedure to the executor
@@ -880,6 +881,14 @@ public class ProcedureExecutor<TEnvironment> {
     return procedures.get(procId);
   }
 
+  public <T extends Procedure> T getProcedure(final Class<T> clazz, final long procId) {
+    final Procedure proc = getProcedure(procId);
+    if (clazz.isInstance(proc)) {
+      return (T)proc;
+    }
+    return null;
+  }
+
   public ProcedureInfo getResult(final long procId) {
     return completed.get(procId);
   }
@@ -917,7 +926,7 @@ public class ProcedureExecutor<TEnvironment> {
     if (result == null) {
       assert !procedures.containsKey(procId) : "procId=" + procId + " is still running";
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Procedure procId=" + procId + " already removed by the cleaner.");
+        LOG.debug("procId=" + procId + " already removed by the cleaner.");
       }
       return;
     }
@@ -999,7 +1008,7 @@ public class ProcedureExecutor<TEnvironment> {
         try {
           listener.procedureLoaded(procId);
         } catch (Throwable e) {
-          LOG.error("The listener " + listener + " had an error: " + e.getMessage(), e);
+          LOG.error("Listener " + listener + " had an error: " + e.getMessage(), e);
         }
       }
     }
@@ -1011,7 +1020,7 @@ public class ProcedureExecutor<TEnvironment> {
         try {
           listener.procedureAdded(procId);
         } catch (Throwable e) {
-          LOG.error("The listener " + listener + " had an error: " + e.getMessage(), e);
+          LOG.error("Listener " + listener + " had an error: " + e.getMessage(), e);
         }
       }
     }
@@ -1023,7 +1032,7 @@ public class ProcedureExecutor<TEnvironment> {
         try {
           listener.procedureFinished(procId);
         } catch (Throwable e) {
-          LOG.error("The listener " + listener + " had an error: " + e.getMessage(), e);
+          LOG.error("Listener " + listener + " had an error: " + e.getMessage(), e);
         }
       }
     }
@@ -1053,6 +1062,11 @@ public class ProcedureExecutor<TEnvironment> {
     return lastProcId.get();
   }
 
+  @VisibleForTesting
+  public Set<Long> getActiveProcIds() {
+    return procedures.keySet();
+  }
+
   private Long getRootProcedureId(Procedure proc) {
     return Procedure.getRootProcedureId(procedures, proc);
   }
@@ -1111,7 +1125,7 @@ public class ProcedureExecutor<TEnvironment> {
 
       if (proc.isSuccess()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Procedure completed in " +
+          LOG.debug("Completed in " +
               StringUtils.humanTimeDiff(proc.elapsedTime()) + ": " + proc);
         }
         // Finalize the procedure state
@@ -1203,7 +1217,7 @@ public class ProcedureExecutor<TEnvironment> {
     }
 
     // Finalize the procedure state
-    LOG.info("Rolledback procedure " + rootProc +
+    LOG.info("Rolled back " + rootProc +
              " exec-time=" + StringUtils.humanTimeDiff(rootProc.elapsedTime()) +
              " exception=" + exception.getMessage());
     procedureFinished(rootProc);
@@ -1220,7 +1234,7 @@ public class ProcedureExecutor<TEnvironment> {
       proc.doRollback(getEnvironment());
     } catch (IOException e) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("rollback attempt failed for " + proc, e);
+        LOG.debug("Roll back attempt failed for " + proc, e);
       }
       return false;
     } catch (InterruptedException e) {
@@ -1294,7 +1308,7 @@ public class ProcedureExecutor<TEnvironment> {
         isSuspended = true;
       } catch (ProcedureYieldException e) {
         if (LOG.isTraceEnabled()) {
-          LOG.trace("Yield procedure: " + procedure + ": " + e.getMessage());
+          LOG.trace("Yield " + procedure + ": " + e.getMessage());
         }
         scheduler.yield(procedure);
         return;
@@ -1418,8 +1432,8 @@ public class ProcedureExecutor<TEnvironment> {
     }
 
     // If this procedure is the last child awake the parent procedure
-    final boolean isTraceEnabled = LOG.isTraceEnabled();
-    if (isTraceEnabled) {
+    final boolean traceEnabled = LOG.isTraceEnabled();
+    if (traceEnabled) {
       LOG.trace(parent + " child is done: " + procedure);
     }
 
@@ -1427,7 +1441,7 @@ public class ProcedureExecutor<TEnvironment> {
       parent.setState(ProcedureState.RUNNABLE);
       store.update(parent);
       scheduler.addFront(parent);
-      if (isTraceEnabled) {
+      if (traceEnabled) {
         LOG.trace(parent + " all the children finished their work, resume.");
       }
       return;
@@ -1438,7 +1452,7 @@ public class ProcedureExecutor<TEnvironment> {
       final Procedure procedure, final Procedure[] subprocs) {
     if (subprocs != null && !procedure.isFailed()) {
       if (LOG.isTraceEnabled()) {
-        LOG.trace("Store add " + procedure + " children " + Arrays.toString(subprocs));
+        LOG.trace("Stored " + procedure + ", children " + Arrays.toString(subprocs));
       }
       store.insert(procedure, subprocs);
     } else {
@@ -1464,7 +1478,7 @@ public class ProcedureExecutor<TEnvironment> {
 
   private void handleInterruptedException(final Procedure proc, final InterruptedException e) {
     if (LOG.isTraceEnabled()) {
-      LOG.trace("got an interrupt during " + proc + ". suspend and retry it later.", e);
+      LOG.trace("Interrupt during " + proc + ". suspend and retry it later.", e);
     }
 
     // NOTE: We don't call Thread.currentThread().interrupt()
@@ -1530,7 +1544,7 @@ public class ProcedureExecutor<TEnvironment> {
 
     @Override
     public void run() {
-      final boolean isTraceEnabled = LOG.isTraceEnabled();
+      final boolean traceEnabled = LOG.isTraceEnabled();
       long lastUpdate = EnvironmentEdgeManager.currentTime();
       while (isRunning() && keepAlive(lastUpdate)) {
         final Procedure procedure = scheduler.poll(keepAliveTime, TimeUnit.MILLISECONDS);
@@ -1539,7 +1553,7 @@ public class ProcedureExecutor<TEnvironment> {
         store.setRunningProcedureCount(activeExecutorCount.incrementAndGet());
         executionStartTime.set(EnvironmentEdgeManager.currentTime());
         try {
-          if (isTraceEnabled) {
+          if (traceEnabled) {
             LOG.trace("Trying to start the execution of " + procedure);
           }
           executeProcedure(procedure);
@@ -1549,7 +1563,7 @@ public class ProcedureExecutor<TEnvironment> {
           executionStartTime.set(Long.MAX_VALUE);
         }
       }
-      LOG.debug("worker thread terminated " + this);
+      LOG.debug("Worker thread terminated " + this);
       workerThreads.remove(this);
     }
 
@@ -1691,7 +1705,7 @@ public class ProcedureExecutor<TEnvironment> {
           sendStopSignal();
           join(250);
           if (i > 0 && (i % 8) == 0) {
-            LOG.warn("waiting termination of thread " + getName() + ", " +
+            LOG.warn("Waiting termination of thread " + getName() + ", " +
               StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime));
           }
         }
@@ -1767,7 +1781,7 @@ public class ProcedureExecutor<TEnvironment> {
 
         // WARN the worker is stuck
         stuckCount++;
-        LOG.warn("found worker stuck " + worker +
+        LOG.warn("Worker stuck " + worker +
             " run time " + StringUtils.humanTimeDiff(worker.getCurrentRunTime()));
       }
       return stuckCount;
@@ -1785,7 +1799,7 @@ public class ProcedureExecutor<TEnvironment> {
         final WorkerThread worker = new WorkerThread(threadGroup);
         workerThreads.add(worker);
         worker.start();
-        LOG.debug("added a new worker thread " + worker);
+        LOG.debug("Added new worker thread " + worker);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 4465993..d4d5773 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -294,7 +294,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
 
   @Override
   public void setRunningProcedureCount(final int count) {
-    LOG.debug("set running procedure count=" + count + " slots=" + slots.length);
+    LOG.debug("Set running procedure count=" + count + ", slots=" + slots.length);
     this.runningProcCount = count > 0 ? Math.min(count, slots.length) : slots.length;
   }
 
@@ -326,7 +326,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
         try {
           flushLogId = initOldLogs(oldLogs);
         } catch (FileNotFoundException e) {
-          LOG.warn("someone else is active and deleted logs. retrying.", e);
+          LOG.warn("Someone else is active and deleted logs. retrying.", e);
           oldLogs = getLogFiles();
           continue;
         }
@@ -334,7 +334,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
         // Create new state-log
         if (!rollWriter(flushLogId + 1)) {
           // someone else has already created this log
-          LOG.debug("someone else has already created log " + flushLogId);
+          LOG.debug("Someone else has already created log " + flushLogId);
           continue;
         }
 
@@ -428,7 +428,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
     try {
       periodicRoll();
     } catch (IOException e) {
-      LOG.warn("unable to cleanup logs on load: " + e.getMessage(), e);
+      LOG.warn("Unable to cleanup logs on load: " + e.getMessage(), e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
index 9edc711..8aa2088 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java
@@ -206,6 +206,18 @@ public class ProcedureTestingUtility {
     }
   }
 
+  public static <TEnv> void waitProcedures(ProcedureExecutor<TEnv> procExecutor, long... procIds) {
+    for (int i = 0; i < procIds.length; ++i) {
+      waitProcedure(procExecutor, procIds[i]);
+    }
+  }
+
+  public static <TEnv> void waitAllProcedures(ProcedureExecutor<TEnv> procExecutor) {
+    for (long procId : procExecutor.getActiveProcIds()) {
+      waitProcedure(procExecutor, procId);
+    }
+  }
+
   public static <TEnv> void waitNoProcedureRunning(ProcedureExecutor<TEnv> procExecutor) {
     int stableRuns = 0;
     while (stableRuns < 10) {


[3/4] hbase git commit: HBASE-16744 Procedure V2 - Lock procedures to allow clients to acquire locks on tables/namespaces/regions (Matteo Bertozzi)

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb09a49/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
new file mode 100644
index 0000000..577b680
--- /dev/null
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
@@ -0,0 +1,5328 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: LockService.proto
+
+package org.apache.hadoop.hbase.shaded.protobuf.generated;
+
+public final class LockServiceProtos {
+  private LockServiceProtos() {}
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite registry) {
+  }
+
+  public static void registerAllExtensions(
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistry registry) {
+    registerAllExtensions(
+        (org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite) registry);
+  }
+  /**
+   * Protobuf enum {@code hbase.pb.LockType}
+   */
+  public enum LockType
+      implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <code>EXCLUSIVE = 1;</code>
+     */
+    EXCLUSIVE(1),
+    /**
+     * <code>SHARED = 2;</code>
+     */
+    SHARED(2),
+    ;
+
+    /**
+     * <code>EXCLUSIVE = 1;</code>
+     */
+    public static final int EXCLUSIVE_VALUE = 1;
+    /**
+     * <code>SHARED = 2;</code>
+     */
+    public static final int SHARED_VALUE = 2;
+
+
+    public final int getNumber() {
+      return value;
+    }
+
+    /**
+     * @deprecated Use {@link #forNumber(int)} instead.
+     */
+    @java.lang.Deprecated
+    public static LockType valueOf(int value) {
+      return forNumber(value);
+    }
+
+    public static LockType forNumber(int value) {
+      switch (value) {
+        case 1: return EXCLUSIVE;
+        case 2: return SHARED;
+        default: return null;
+      }
+    }
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<LockType>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+        LockType> internalValueMap =
+          new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<LockType>() {
+            public LockType findValueByNumber(int number) {
+              return LockType.forNumber(number);
+            }
+          };
+
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(ordinal());
+    }
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor().getEnumTypes().get(0);
+    }
+
+    private static final LockType[] VALUES = values();
+
+    public static LockType valueOf(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int value;
+
+    private LockType(int value) {
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:hbase.pb.LockType)
+  }
+
+  public interface LockRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.LockRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    boolean hasLockType();
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType();
+
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    boolean hasNamespace();
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    java.lang.String getNamespace();
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getNamespaceBytes();
+
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    boolean hasTableName();
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName();
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder();
+
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> 
+        getRegionInfoList();
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index);
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    int getRegionInfoCount();
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+        getRegionInfoOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+        int index);
+
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    boolean hasDescription();
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    java.lang.String getDescription();
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getDescriptionBytes();
+
+    /**
+     * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+     */
+    boolean hasNonceGroup();
+    /**
+     * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+     */
+    long getNonceGroup();
+
+    /**
+     * <code>optional uint64 nonce = 7 [default = 0];</code>
+     */
+    boolean hasNonce();
+    /**
+     * <code>optional uint64 nonce = 7 [default = 0];</code>
+     */
+    long getNonce();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.LockRequest}
+   */
+  public  static final class LockRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.LockRequest)
+      LockRequestOrBuilder {
+    // Use LockRequest.newBuilder() to construct.
+    private LockRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private LockRequest() {
+      lockType_ = 1;
+      namespace_ = "";
+      regionInfo_ = java.util.Collections.emptyList();
+      description_ = "";
+      nonceGroup_ = 0L;
+      nonce_ = 0L;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private LockRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                lockType_ = rawValue;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000002;
+              namespace_ = bs;
+              break;
+            }
+            case 26: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000004) == 0x00000004)) {
+                subBuilder = tableName_.toBuilder();
+              }
+              tableName_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(tableName_);
+                tableName_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000004;
+              break;
+            }
+            case 34: {
+              if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+                regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>();
+                mutable_bitField0_ |= 0x00000008;
+              }
+              regionInfo_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry));
+              break;
+            }
+            case 42: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000008;
+              description_ = bs;
+              break;
+            }
+            case 48: {
+              bitField0_ |= 0x00000010;
+              nonceGroup_ = input.readUInt64();
+              break;
+            }
+            case 56: {
+              bitField0_ |= 0x00000020;
+              nonce_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
+          regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int LOCK_TYPE_FIELD_NUMBER = 1;
+    private int lockType_;
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    public boolean hasLockType() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+      return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
+    }
+
+    public static final int NAMESPACE_FIELD_NUMBER = 2;
+    private volatile java.lang.Object namespace_;
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    public boolean hasNamespace() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    public java.lang.String getNamespace() {
+      java.lang.Object ref = namespace_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          namespace_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string namespace = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getNamespaceBytes() {
+      java.lang.Object ref = namespace_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        namespace_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int TABLE_NAME_FIELD_NUMBER = 3;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_;
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    public boolean hasTableName() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+    }
+    /**
+     * <code>optional .hbase.pb.TableName table_name = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+      return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+    }
+
+    public static final int REGION_INFO_FIELD_NUMBER = 4;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_;
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+      return regionInfo_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+        getRegionInfoOrBuilderList() {
+      return regionInfo_;
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    public int getRegionInfoCount() {
+      return regionInfo_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+      return regionInfo_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+        int index) {
+      return regionInfo_.get(index);
+    }
+
+    public static final int DESCRIPTION_FIELD_NUMBER = 5;
+    private volatile java.lang.Object description_;
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    public boolean hasDescription() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    public java.lang.String getDescription() {
+      java.lang.Object ref = description_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          description_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string description = 5;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getDescriptionBytes() {
+      java.lang.Object ref = description_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        description_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int NONCE_GROUP_FIELD_NUMBER = 6;
+    private long nonceGroup_;
+    /**
+     * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+     */
+    public boolean hasNonceGroup() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+     */
+    public long getNonceGroup() {
+      return nonceGroup_;
+    }
+
+    public static final int NONCE_FIELD_NUMBER = 7;
+    private long nonce_;
+    /**
+     * <code>optional uint64 nonce = 7 [default = 0];</code>
+     */
+    public boolean hasNonce() {
+      return ((bitField0_ & 0x00000020) == 0x00000020);
+    }
+    /**
+     * <code>optional uint64 nonce = 7 [default = 0];</code>
+     */
+    public long getNonce() {
+      return nonce_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasLockType()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (hasTableName()) {
+        if (!getTableName().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      for (int i = 0; i < getRegionInfoCount(); i++) {
+        if (!getRegionInfo(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, lockType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, namespace_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeMessage(3, getTableName());
+      }
+      for (int i = 0; i < regionInfo_.size(); i++) {
+        output.writeMessage(4, regionInfo_.get(i));
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 5, description_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeUInt64(6, nonceGroup_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        output.writeUInt64(7, nonce_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, lockType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, namespace_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(3, getTableName());
+      }
+      for (int i = 0; i < regionInfo_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, regionInfo_.get(i));
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(5, description_);
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(6, nonceGroup_);
+      }
+      if (((bitField0_ & 0x00000020) == 0x00000020)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(7, nonce_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest) obj;
+
+      boolean result = true;
+      result = result && (hasLockType() == other.hasLockType());
+      if (hasLockType()) {
+        result = result && lockType_ == other.lockType_;
+      }
+      result = result && (hasNamespace() == other.hasNamespace());
+      if (hasNamespace()) {
+        result = result && getNamespace()
+            .equals(other.getNamespace());
+      }
+      result = result && (hasTableName() == other.hasTableName());
+      if (hasTableName()) {
+        result = result && getTableName()
+            .equals(other.getTableName());
+      }
+      result = result && getRegionInfoList()
+          .equals(other.getRegionInfoList());
+      result = result && (hasDescription() == other.hasDescription());
+      if (hasDescription()) {
+        result = result && getDescription()
+            .equals(other.getDescription());
+      }
+      result = result && (hasNonceGroup() == other.hasNonceGroup());
+      if (hasNonceGroup()) {
+        result = result && (getNonceGroup()
+            == other.getNonceGroup());
+      }
+      result = result && (hasNonce() == other.hasNonce());
+      if (hasNonce()) {
+        result = result && (getNonce()
+            == other.getNonce());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasLockType()) {
+        hash = (37 * hash) + LOCK_TYPE_FIELD_NUMBER;
+        hash = (53 * hash) + lockType_;
+      }
+      if (hasNamespace()) {
+        hash = (37 * hash) + NAMESPACE_FIELD_NUMBER;
+        hash = (53 * hash) + getNamespace().hashCode();
+      }
+      if (hasTableName()) {
+        hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getTableName().hashCode();
+      }
+      if (getRegionInfoCount() > 0) {
+        hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
+        hash = (53 * hash) + getRegionInfoList().hashCode();
+      }
+      if (hasDescription()) {
+        hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER;
+        hash = (53 * hash) + getDescription().hashCode();
+      }
+      if (hasNonceGroup()) {
+        hash = (37 * hash) + NONCE_GROUP_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getNonceGroup());
+      }
+      if (hasNonce()) {
+        hash = (37 * hash) + NONCE_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getNonce());
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.LockRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.LockRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getTableNameFieldBuilder();
+          getRegionInfoFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        lockType_ = 1;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        namespace_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        if (tableNameBuilder_ == null) {
+          tableName_ = null;
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000008);
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        description_ = "";
+        bitField0_ = (bitField0_ & ~0x00000010);
+        nonceGroup_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000020);
+        nonce_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000040);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.lockType_ = lockType_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.namespace_ = namespace_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        if (tableNameBuilder_ == null) {
+          result.tableName_ = tableName_;
+        } else {
+          result.tableName_ = tableNameBuilder_.build();
+        }
+        if (regionInfoBuilder_ == null) {
+          if (((bitField0_ & 0x00000008) == 0x00000008)) {
+            regionInfo_ = java.util.Collections.unmodifiableList(regionInfo_);
+            bitField0_ = (bitField0_ & ~0x00000008);
+          }
+          result.regionInfo_ = regionInfo_;
+        } else {
+          result.regionInfo_ = regionInfoBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        result.description_ = description_;
+        if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.nonceGroup_ = nonceGroup_;
+        if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
+          to_bitField0_ |= 0x00000020;
+        }
+        result.nonce_ = nonce_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest.getDefaultInstance()) return this;
+        if (other.hasLockType()) {
+          setLockType(other.getLockType());
+        }
+        if (other.hasNamespace()) {
+          bitField0_ |= 0x00000002;
+          namespace_ = other.namespace_;
+          onChanged();
+        }
+        if (other.hasTableName()) {
+          mergeTableName(other.getTableName());
+        }
+        if (regionInfoBuilder_ == null) {
+          if (!other.regionInfo_.isEmpty()) {
+            if (regionInfo_.isEmpty()) {
+              regionInfo_ = other.regionInfo_;
+              bitField0_ = (bitField0_ & ~0x00000008);
+            } else {
+              ensureRegionInfoIsMutable();
+              regionInfo_.addAll(other.regionInfo_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.regionInfo_.isEmpty()) {
+            if (regionInfoBuilder_.isEmpty()) {
+              regionInfoBuilder_.dispose();
+              regionInfoBuilder_ = null;
+              regionInfo_ = other.regionInfo_;
+              bitField0_ = (bitField0_ & ~0x00000008);
+              regionInfoBuilder_ = 
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getRegionInfoFieldBuilder() : null;
+            } else {
+              regionInfoBuilder_.addAllMessages(other.regionInfo_);
+            }
+          }
+        }
+        if (other.hasDescription()) {
+          bitField0_ |= 0x00000010;
+          description_ = other.description_;
+          onChanged();
+        }
+        if (other.hasNonceGroup()) {
+          setNonceGroup(other.getNonceGroup());
+        }
+        if (other.hasNonce()) {
+          setNonce(other.getNonce());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasLockType()) {
+          return false;
+        }
+        if (hasTableName()) {
+          if (!getTableName().isInitialized()) {
+            return false;
+          }
+        }
+        for (int i = 0; i < getRegionInfoCount(); i++) {
+          if (!getRegionInfo(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private int lockType_ = 1;
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public boolean hasLockType() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+        return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public Builder setLockType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        lockType_ = value.getNumber();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public Builder clearLockType() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        lockType_ = 1;
+        onChanged();
+        return this;
+      }
+
+      private java.lang.Object namespace_ = "";
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public boolean hasNamespace() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public java.lang.String getNamespace() {
+        java.lang.Object ref = namespace_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            namespace_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getNamespaceBytes() {
+        java.lang.Object ref = namespace_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          namespace_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public Builder setNamespace(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        namespace_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public Builder clearNamespace() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        namespace_ = getDefaultInstance().getNamespace();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string namespace = 2;</code>
+       */
+      public Builder setNamespaceBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        namespace_ = value;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName tableName_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> tableNameBuilder_;
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public boolean hasTableName() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName getTableName() {
+        if (tableNameBuilder_ == null) {
+          return tableName_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+        } else {
+          return tableNameBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public Builder setTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          tableName_ = value;
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public Builder setTableName(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder builderForValue) {
+        if (tableNameBuilder_ == null) {
+          tableName_ = builderForValue.build();
+          onChanged();
+        } else {
+          tableNameBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public Builder mergeTableName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName value) {
+        if (tableNameBuilder_ == null) {
+          if (((bitField0_ & 0x00000004) == 0x00000004) &&
+              tableName_ != null &&
+              tableName_ != org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance()) {
+            tableName_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.newBuilder(tableName_).mergeFrom(value).buildPartial();
+          } else {
+            tableName_ = value;
+          }
+          onChanged();
+        } else {
+          tableNameBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000004;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public Builder clearTableName() {
+        if (tableNameBuilder_ == null) {
+          tableName_ = null;
+          onChanged();
+        } else {
+          tableNameBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000004);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder getTableNameBuilder() {
+        bitField0_ |= 0x00000004;
+        onChanged();
+        return getTableNameFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder getTableNameOrBuilder() {
+        if (tableNameBuilder_ != null) {
+          return tableNameBuilder_.getMessageOrBuilder();
+        } else {
+          return tableName_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.getDefaultInstance() : tableName_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.TableName table_name = 3;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder> 
+          getTableNameFieldBuilder() {
+        if (tableNameBuilder_ == null) {
+          tableNameBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableName.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableNameOrBuilder>(
+                  getTableName(),
+                  getParentForChildren(),
+                  isClean());
+          tableName_ = null;
+        }
+        return tableNameBuilder_;
+      }
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> regionInfo_ =
+        java.util.Collections.emptyList();
+      private void ensureRegionInfoIsMutable() {
+        if (!((bitField0_ & 0x00000008) == 0x00000008)) {
+          regionInfo_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo>(regionInfo_);
+          bitField0_ |= 0x00000008;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> getRegionInfoList() {
+        if (regionInfoBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(regionInfo_);
+        } else {
+          return regionInfoBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public int getRegionInfoCount() {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_.size();
+        } else {
+          return regionInfoBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo(int index) {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_.get(index);
+        } else {
+          return regionInfoBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder setRegionInfo(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionInfoIsMutable();
+          regionInfo_.set(index, value);
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder setRegionInfo(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionInfoBuilder_ == null) {
+          ensureRegionInfoIsMutable();
+          regionInfo_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          regionInfoBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder addRegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionInfoIsMutable();
+          regionInfo_.add(value);
+          onChanged();
+        } else {
+          regionInfoBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder addRegionInfo(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo value) {
+        if (regionInfoBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureRegionInfoIsMutable();
+          regionInfo_.add(index, value);
+          onChanged();
+        } else {
+          regionInfoBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder addRegionInfo(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionInfoBuilder_ == null) {
+          ensureRegionInfoIsMutable();
+          regionInfo_.add(builderForValue.build());
+          onChanged();
+        } else {
+          regionInfoBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder addRegionInfo(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
+        if (regionInfoBuilder_ == null) {
+          ensureRegionInfoIsMutable();
+          regionInfo_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          regionInfoBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder addAllRegionInfo(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo> values) {
+        if (regionInfoBuilder_ == null) {
+          ensureRegionInfoIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, regionInfo_);
+          onChanged();
+        } else {
+          regionInfoBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder clearRegionInfo() {
+        if (regionInfoBuilder_ == null) {
+          regionInfo_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000008);
+          onChanged();
+        } else {
+          regionInfoBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public Builder removeRegionInfo(int index) {
+        if (regionInfoBuilder_ == null) {
+          ensureRegionInfoIsMutable();
+          regionInfo_.remove(index);
+          onChanged();
+        } else {
+          regionInfoBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder(
+          int index) {
+        return getRegionInfoFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder(
+          int index) {
+        if (regionInfoBuilder_ == null) {
+          return regionInfo_.get(index);  } else {
+          return regionInfoBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+           getRegionInfoOrBuilderList() {
+        if (regionInfoBuilder_ != null) {
+          return regionInfoBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(regionInfo_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder() {
+        return getRegionInfoFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder addRegionInfoBuilder(
+          int index) {
+        return getRegionInfoFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.RegionInfo region_info = 4;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder> 
+           getRegionInfoBuilderList() {
+        return getRegionInfoFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> 
+          getRegionInfoFieldBuilder() {
+        if (regionInfoBuilder_ == null) {
+          regionInfoBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
+                  regionInfo_,
+                  ((bitField0_ & 0x00000008) == 0x00000008),
+                  getParentForChildren(),
+                  isClean());
+          regionInfo_ = null;
+        }
+        return regionInfoBuilder_;
+      }
+
+      private java.lang.Object description_ = "";
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public boolean hasDescription() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public java.lang.String getDescription() {
+        java.lang.Object ref = description_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            description_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getDescriptionBytes() {
+        java.lang.Object ref = description_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          description_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public Builder setDescription(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        description_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public Builder clearDescription() {
+        bitField0_ = (bitField0_ & ~0x00000010);
+        description_ = getDefaultInstance().getDescription();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string description = 5;</code>
+       */
+      public Builder setDescriptionBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000010;
+        description_ = value;
+        onChanged();
+        return this;
+      }
+
+      private long nonceGroup_ ;
+      /**
+       * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+       */
+      public boolean hasNonceGroup() {
+        return ((bitField0_ & 0x00000020) == 0x00000020);
+      }
+      /**
+       * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+       */
+      public long getNonceGroup() {
+        return nonceGroup_;
+      }
+      /**
+       * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+       */
+      public Builder setNonceGroup(long value) {
+        bitField0_ |= 0x00000020;
+        nonceGroup_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 nonce_group = 6 [default = 0];</code>
+       */
+      public Builder clearNonceGroup() {
+        bitField0_ = (bitField0_ & ~0x00000020);
+        nonceGroup_ = 0L;
+        onChanged();
+        return this;
+      }
+
+      private long nonce_ ;
+      /**
+       * <code>optional uint64 nonce = 7 [default = 0];</code>
+       */
+      public boolean hasNonce() {
+        return ((bitField0_ & 0x00000040) == 0x00000040);
+      }
+      /**
+       * <code>optional uint64 nonce = 7 [default = 0];</code>
+       */
+      public long getNonce() {
+        return nonce_;
+      }
+      /**
+       * <code>optional uint64 nonce = 7 [default = 0];</code>
+       */
+      public Builder setNonce(long value) {
+        bitField0_ |= 0x00000040;
+        nonce_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional uint64 nonce = 7 [default = 0];</code>
+       */
+      public Builder clearNonce() {
+        bitField0_ = (bitField0_ & ~0x00000040);
+        nonce_ = 0L;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.LockRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.LockRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<LockRequest>() {
+      public LockRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new LockRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface LockResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.LockResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    boolean hasProcId();
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    long getProcId();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.LockResponse}
+   */
+  public  static final class LockResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.LockResponse)
+      LockResponseOrBuilder {
+    // Use LockResponse.newBuilder() to construct.
+    private LockResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private LockResponse() {
+      procId_ = 0L;
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private LockResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              bitField0_ |= 0x00000001;
+              procId_ = input.readUInt64();
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int PROC_ID_FIELD_NUMBER = 1;
+    private long procId_;
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    public boolean hasProcId() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    public long getProcId() {
+      return procId_;
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasProcId()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeUInt64(1, procId_);
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeUInt64Size(1, procId_);
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse) obj;
+
+      boolean result = true;
+      result = result && (hasProcId() == other.hasProcId());
+      if (hasProcId()) {
+        result = result && (getProcId()
+            == other.getProcId());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptorForType().hashCode();
+      if (hasProcId()) {
+        hash = (37 * hash) + PROC_ID_FIELD_NUMBER;
+        hash = (53 * hash) + org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.hashLong(
+            getProcId());
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.LockResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.LockResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        procId_ = 0L;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.procId_ = procId_;
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse.getDefaultInstance()) return this;
+        if (other.hasProcId()) {
+          setProcId(other.getProcId());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasProcId()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private long procId_ ;
+      /**
+       * <code>required uint64 proc_id = 1;</code>
+       */
+      public boolean hasProcId() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required uint64 proc_id = 1;</code>
+       */
+      public long getProcId() {
+        return procId_;
+      }
+      /**
+       * <code>required uint64 proc_id = 1;</code>
+       */
+      public Builder setProcId(long value) {
+        bitField0_ |= 0x00000001;
+        procId_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required uint64 proc_id = 1;</code>
+       */
+      public Builder clearProcId() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        procId_ = 0L;
+        onChanged();
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.LockResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.LockResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<LockResponse>() {
+      public LockResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new LockResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<LockResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface LockHeartbeatRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.LockHeartbeatRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    boolean hasProcId();
+    /**
+     * <code>required uint64 proc_id = 1;</code>
+     */
+    long getProcId();
+
+    /**
+     * <code>optional bool keep_alive = 2 [default = true];</code>
+     */
+    boolean hasKeepAlive();
+    /**
+     * <code>optional bool keep_alive = 2 [default = true];</code>
+     */
+    boolean getKeepAlive();
+  }
+  /**
+   * Protobuf type {@code hbase.pb.LockHeartbeatRequest}
+   */
+  public  static final class LockHeartbeatRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessa

<TRUNCATED>