You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2017/04/25 17:19:57 UTC

[1/4] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

Repository: hbase
Updated Branches:
  refs/heads/master 1367519cd -> 255750641


http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/LockService.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index 0df7f2e..1898e68 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
 import "HBase.proto";
+import "Procedure.proto";
 
 enum LockType {
   EXCLUSIVE = 1;
@@ -70,6 +71,27 @@ message LockProcedureData {
   optional bool is_master_lock = 6 [default = false];
 }
 
+enum ResourceType {
+  RESOURCE_TYPE_SERVER = 1;
+  RESOURCE_TYPE_NAMESPACE = 2;
+  RESOURCE_TYPE_TABLE = 3;
+  RESOURCE_TYPE_REGION = 4;
+}
+
+message WaitingProcedure {
+  required LockType lock_type = 1;
+  required Procedure procedure = 2;
+}
+
+message LockInfo {
+  required ResourceType resource_type = 1;
+  optional string resource_name = 2;
+  required LockType lock_type = 3;
+  optional Procedure exclusive_lock_owner_procedure = 4;
+  optional int32 shared_lock_count = 5;
+  repeated WaitingProcedure waitingProcedures = 6;
+}
+
 service LockService {
   /** Acquire lock on namespace/table/region */
   rpc RequestLock(LockRequest) returns(LockResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index d7d51e2..0c3da02 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -30,6 +30,7 @@ import "HBase.proto";
 import "Client.proto";
 import "ClusterStatus.proto";
 import "ErrorHandling.proto";
+import "LockService.proto";
 import "Procedure.proto";
 import "Quota.proto";
 import "Replication.proto";
@@ -534,6 +535,13 @@ message ListProceduresResponse {
   repeated Procedure procedure = 1;
 }
 
+message ListLocksRequest {
+}
+
+message ListLocksResponse {
+  repeated LockInfo lock = 1;
+}
+
 message SetQuotaRequest {
   optional string user_name = 1;
   optional string user_group = 2;
@@ -888,6 +896,9 @@ service MasterService {
   rpc ListProcedures(ListProceduresRequest)
     returns(ListProceduresResponse);
 
+  rpc ListLocks(ListLocksRequest)
+    returns(ListLocksResponse);
+
   /** Add a replication peer */
   rpc AddReplicationPeer(AddReplicationPeerRequest)
     returns(AddReplicationPeerResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112..e1a47c5 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -125,7 +125,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
                 <ul class="nav navbar-nav">
                 <li class="active"><a href="/">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/procedures.jsp">Procedures</a></li>
+                <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                 <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
                 <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index aab852c..ad8aa14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -983,6 +984,24 @@ public interface MasterObserver extends Coprocessor {
       List<ProcedureInfo> procInfoList) throws IOException {}
 
   /**
+   * Called before a listLocks request has been processed.
+   * @param ctx the environment to interact with the framework and master
+   * @throws IOException if something went wrong
+   */
+  default void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx)
+      throws IOException {}
+
+  /**
+   * Called after a listLocks request has been processed.
+   * @param ctx the environment to interact with the framework and master
+   * @param lockInfoList the list of locks about to be returned
+   * @throws IOException if something went wrong
+   */
+  default void postListLocks(
+      ObserverContext<MasterCoprocessorEnvironment> ctx,
+      List<LockInfo> lockInfoList) throws IOException {}
+
+  /**
    * Called prior to moving a given region from one region server to another.
    * @param ctx the environment to interact with the framework and master
    * @param region the HRegionInfo

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f9670e1..e4ba285 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
 import org.apache.hadoop.hbase.ProcedureInfo;
-import org.apache.hadoop.hbase.RegionStateListener;
 import org.apache.hadoop.hbase.ScheduledChore;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
@@ -113,6 +112,7 @@ import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.MergeTableRegionsProcedure;
 import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
@@ -128,6 +128,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
@@ -979,7 +980,7 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   void initQuotaManager() throws IOException {
     MasterQuotaManager quotaManager = new MasterQuotaManager(this);
-    this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
+    this.assignmentManager.setRegionStateListener(quotaManager);
     quotaManager.start();
     this.quotaManager = quotaManager;
   }
@@ -1141,8 +1142,8 @@ public class HMaster extends HRegionServer implements MasterServices {
     procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir,
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
     procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
-    procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
-        procEnv.getProcedureScheduler());
+    MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
+    procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
     configurationManager.registerObserver(procEnv);
 
     final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
@@ -2909,6 +2910,34 @@ public class HMaster extends HRegionServer implements MasterServices {
     return procInfoList;
   }
 
+  private Map<Long, ProcedureInfo> getProcedureInfos() {
+    final List<ProcedureInfo> list = procedureExecutor.listProcedures();
+    final Map<Long, ProcedureInfo> map = new HashMap<>();
+
+    for (ProcedureInfo procedureInfo : list) {
+      map.put(procedureInfo.getProcId(), procedureInfo);
+    }
+
+    return map;
+  }
+
+  @Override
+  public List<LockInfo> listLocks() throws IOException {
+    if (cpHost != null) {
+      cpHost.preListLocks();
+    }
+
+    MasterProcedureScheduler procedureScheduler = procedureExecutor.getEnvironment().getProcedureScheduler();
+
+    final List<LockInfo> lockInfoList = procedureScheduler.listLocks();
+
+    if (cpHost != null) {
+      cpHost.postListLocks(lockInfoList);
+    }
+
+    return lockInfoList;
+  }
+
   /**
    * Returns the list of table descriptors that match the specified request
    * @param namespace the namespace to query, or null if querying for all

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 8a7a387..2f5e66e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.metrics.MetricRegistry;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.User;
@@ -706,6 +707,26 @@ public class MasterCoprocessorHost
     });
   }
 
+  public boolean preListLocks() throws IOException {
+    return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.preListLocks(ctx);
+      }
+    });
+  }
+
+  public void postListLocks(final List<LockInfo> lockInfoList) throws IOException {
+    execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+      @Override
+      public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+          throws IOException {
+        oserver.postListLocks(ctx, lockInfoList);
+      }
+    });
+  }
+
   public boolean preMove(final HRegionInfo region, final ServerName srcServer,
       final ServerName destServer) throws IOException {
     return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 9af8f45..40c4a71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -56,8 +56,8 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.regionserver.RSRpcServices;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -86,129 +86,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockH
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
@@ -1108,7 +987,7 @@ public class MasterRpcServices extends RSRpcServices
         }
         master.getMasterProcedureExecutor().removeResult(request.getProcId());
       } else {
-        Procedure proc = v.getSecond();
+        Procedure<?> proc = v.getSecond();
         if (proc == null) {
           builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
         } else {
@@ -1160,7 +1039,7 @@ public class MasterRpcServices extends RSRpcServices
     try {
       final ListProceduresResponse.Builder response = ListProceduresResponse.newBuilder();
       for (ProcedureInfo p: master.listProcedures()) {
-        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
+        response.addProcedure(ProtobufUtil.toProtoProcedure(p));
       }
       return response.build();
     } catch (IOException e) {
@@ -1169,6 +1048,23 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
+  public ListLocksResponse listLocks(
+      RpcController controller,
+      ListLocksRequest request) throws ServiceException {
+    try {
+      final ListLocksResponse.Builder builder = ListLocksResponse.newBuilder();
+
+      for (LockInfo lockInfo: master.listLocks()) {
+        builder.addLock(ProtobufUtil.toProtoLockInfo(lockInfo));
+      }
+
+      return builder.build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
       ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
     try {
@@ -1459,7 +1355,6 @@ public class MasterRpcServices extends RSRpcServices
         throw new UnknownRegionException(Bytes.toString(regionName));
       }
 
-      if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
       HRegionInfo hri = pair.getFirst();
       if (master.cpHost != null) {
         if (master.cpHost.preUnassign(hri, force)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 66758f8..4924d72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-
 import com.google.protobuf.Service;
 
 /**
@@ -353,6 +353,13 @@ public interface MasterServices extends Server {
   public List<ProcedureInfo> listProcedures() throws IOException;
 
   /**
+   * List locks
+   * @return lock list
+   * @throws IOException
+   */
+  public List<LockInfo> listLocks() throws IOException;
+
+  /**
    * Get list of table descriptors by namespace
    * @param name namespace name
    * @return descriptors

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 3cad51c..512f7e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -204,6 +204,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
    * WALs.
    * @return false, so procedure framework doesn't mark this procedure as failure.
    */
+  @Override
   protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
     synchronized (event) {
       if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
@@ -231,7 +232,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   }
 
   @Override
-  protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
+  protected Procedure<?>[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
     // Local master locks don't store any state, so on recovery, simply finish this procedure
     // immediately.
     if (recoveredMasterLock) return null;
@@ -334,6 +335,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     setState(ProcedureProtos.ProcedureState.RUNNABLE);
   }
 
+  @Override
   protected void toStringClassDetails(final StringBuilder builder) {
     super.toStringClassDetails(builder);
     if (regionInfos != null) {
@@ -350,6 +352,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     builder.append(", type=").append(type);
   }
 
+  public LockType getType() {
+    return type;
+  }
+
   private LockInterface setupLock() throws IllegalArgumentException {
     if (regionInfos != null) {
       return setupRegionLock();

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 48a0b62..b0baf85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -21,27 +21,34 @@ package org.apache.hadoop.hbase.master.procedure;
 import com.google.common.annotations.VisibleForTesting;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableExistsException;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
 import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
+import org.apache.hadoop.hbase.procedure2.LockAndQueue;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.LockStatus;
 import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.LockAndQueue;
 import org.apache.hadoop.hbase.procedure2.ProcedureDeque;
-import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.util.AvlUtil.AvlIterableList;
+import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
 import org.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNode;
 import org.apache.hadoop.hbase.util.AvlUtil.AvlTree;
 import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
@@ -226,7 +233,111 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     return pollResult;
   }
 
-  @VisibleForTesting
+  private LockInfo createLockInfo(LockInfo.ResourceType resourceType,
+      String resourceName, LockAndQueue queue) {
+    LockInfo info = new LockInfo();
+
+    info.setResourceType(resourceType);
+    info.setResourceName(resourceName);
+
+    if (queue.hasExclusiveLock()) {
+      info.setLockType(LockInfo.LockType.EXCLUSIVE);
+
+      Procedure<?> exclusiveLockOwnerProcedure = queue.getExclusiveLockOwnerProcedure();
+      ProcedureInfo exclusiveLockOwnerProcedureInfo =
+          ProcedureUtil.convertToProcedureInfo(exclusiveLockOwnerProcedure);
+      info.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureInfo);
+    } else if (queue.getSharedLockCount() > 0) {
+      info.setLockType(LockInfo.LockType.SHARED);
+      info.setSharedLockCount(queue.getSharedLockCount());
+    }
+
+    for (Procedure<?> procedure : queue) {
+      if (!(procedure instanceof LockProcedure)) {
+        continue;
+      }
+
+      LockProcedure lockProcedure = (LockProcedure)procedure;
+      LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+
+      switch (lockProcedure.getType()) {
+      case EXCLUSIVE:
+        waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
+        break;
+      case SHARED:
+        waitingProcedure.setLockType(LockInfo.LockType.SHARED);
+        break;
+      }
+
+      ProcedureInfo procedureInfo = ProcedureUtil.convertToProcedureInfo(lockProcedure);
+      waitingProcedure.setProcedure(procedureInfo);
+
+      info.addWaitingProcedure(waitingProcedure);
+    }
+
+    return info;
+  }
+
+  @Override
+  public List<LockInfo> listLocks() {
+    schedLock();
+
+    try {
+      List<LockInfo> lockInfos = new ArrayList<>();
+
+      for (Entry<ServerName, LockAndQueue> entry : locking.serverLocks
+          .entrySet()) {
+        String serverName = entry.getKey().getServerName();
+        LockAndQueue queue = entry.getValue();
+
+        if (queue.isLocked()) {
+          LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.SERVER,
+              serverName, queue);
+          lockInfos.add(lockInfo);
+        }
+      }
+
+      for (Entry<String, LockAndQueue> entry : locking.namespaceLocks
+          .entrySet()) {
+        String namespaceName = entry.getKey();
+        LockAndQueue queue = entry.getValue();
+
+        if (queue.isLocked()) {
+          LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.NAMESPACE,
+              namespaceName, queue);
+          lockInfos.add(lockInfo);
+        }
+      }
+
+      for (Entry<TableName, LockAndQueue> entry : locking.tableLocks
+          .entrySet()) {
+        String tableName = entry.getKey().getNameAsString();
+        LockAndQueue queue = entry.getValue();
+
+        if (queue.isLocked()) {
+          LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.TABLE,
+              tableName, queue);
+          lockInfos.add(lockInfo);
+        }
+      }
+
+      for (Entry<String, LockAndQueue> entry : locking.regionLocks.entrySet()) {
+        String regionName = entry.getKey();
+        LockAndQueue queue = entry.getValue();
+
+        if (queue.isLocked()) {
+          LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.REGION,
+              regionName, queue);
+          lockInfos.add(lockInfo);
+        }
+      }
+
+      return lockInfos;
+    } finally {
+      schedUnlock();
+    }
+  }
+
   @Override
   public void clear() {
     schedLock();
@@ -390,6 +501,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       super(serverName, serverLock);
     }
 
+    @Override
     public boolean requireExclusiveLock(Procedure proc) {
       ServerProcedureInterface spi = (ServerProcedureInterface)proc;
       switch (spi.getServerOperationType()) {
@@ -437,6 +549,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return true;
     }
 
+    @Override
     public boolean requireExclusiveLock(Procedure proc) {
       return requireTableExclusiveLock((TableProcedureInterface)proc);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
index c841e61..b686114 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
@@ -27,9 +27,10 @@
   import="java.util.Set"
   import="org.apache.hadoop.conf.Configuration"
   import="org.apache.hadoop.hbase.HBaseConfiguration"
-  import="org.apache.hadoop.hbase.ProcedureInfo"
   import="org.apache.hadoop.hbase.master.HMaster"
   import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
+  import="org.apache.hadoop.hbase.ProcedureInfo"
+  import="org.apache.hadoop.hbase.procedure2.LockInfo"
   import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
   import="org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFile"
   import="org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore"
@@ -55,6 +56,8 @@
       return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
     }
   });
+
+  List<LockInfo> locks = master.listLocks();
 %>
 <!--[if IE]>
 <!DOCTYPE html>
@@ -62,15 +65,15 @@
 <?xml version="1.0" encoding="UTF-8" ?>
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head>
-    <meta charset="utf-8">
+    <meta charset="utf-8" />
     <title>HBase Master Procedures: <%= master.getServerName() %></title>
-    <meta name="viewport" content="width=device-width, initial-scale=1.0">
-    <meta name="description" content="">
-    <meta name="author" content="">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="" />
 
-    <link href="/static/css/bootstrap.min.css" rel="stylesheet">
-    <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
-    <link href="/static/css/hbase.css" rel="stylesheet">
+    <link href="/static/css/bootstrap.min.css" rel="stylesheet" />
+    <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet" />
+    <link href="/static/css/hbase.css" rel="stylesheet" />
   </head>
 <body>
 <div class="navbar  navbar-fixed-top navbar-default">
@@ -87,7 +90,7 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/procedures.jsp">Procedures</a></li>
+                <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                 <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
                 <li><a href="/dump">Debug Dump</a></li>
@@ -116,43 +119,42 @@
         <th>Last Update</th>
         <th>Errors</th>
     </tr>
-    <tr>
-      <% for (ProcedureInfo procInfo : procedures) { %>
+    <% for (ProcedureInfo procInfo : procedures) { %>
       <tr>
-        <td><%= procInfo.getProcId() %></a></td>
-        <td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></a></td>
-        <td><%= escapeXml(procInfo.getProcState().toString()) %></a></td>
-        <td><%= escapeXml(procInfo.getProcOwner()) %></a></td>
-        <td><%= escapeXml(procInfo.getProcName()) %></a></td>
-        <td><%= new Date(procInfo.getSubmittedTime()) %></a></td>
-        <td><%= new Date(procInfo.getLastUpdate()) %></a></td>
-        <td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></a></td>
+        <td><%= procInfo.getProcId() %></td>
+        <td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></td>
+        <td><%= escapeXml(procInfo.getProcState().toString()) %></td>
+        <td><%= escapeXml(procInfo.getProcOwner()) %></td>
+        <td><%= escapeXml(procInfo.getProcName()) %></td>
+        <td><%= new Date(procInfo.getSubmittedTime()) %></td>
+        <td><%= new Date(procInfo.getLastUpdate()) %></td>
+        <td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></td>
       </tr>
     <% } %>
   </table>
 </div>
-<br>
+<br />
 <div class="container-fluid content">
   <div class="row">
     <div class="page-header">
       <h2>Procedure WAL State</h2>
     </div>
   </div>
-<div class="tabbable">
-  <ul class="nav nav-pills">
-    <li class="active">
-      <a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
-    </li>
-    <li class="">
-      <a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
-     </li>
-    <li class="">
-      <a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
-     </li>
-     <li class="">
-       <a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
-     </li>
-  </ul>
+  <div class="tabbable">
+    <ul class="nav nav-pills">
+      <li class="active">
+        <a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
+      </li>
+      <li class="">
+        <a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
+      </li>
+      <li class="">
+        <a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
+      </li>
+      <li class="">
+        <a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
+      </li>
+    </ul>
     <div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
       <div class="tab-pane active" id="tab_WALFiles">
         <% if (procedureWALFiles != null && procedureWALFiles.size() > 0) { %>
@@ -168,8 +170,8 @@
             <tr>
               <td> <%= pwf.getLogId() %></td>
               <td> <%= StringUtils.humanSize(pwf.getSize()) %> </td>
-              <td> <%= new Date(pwf.getTimestamp()) %></a></td>
-              <td> <%= escapeXml(pwf.toString()) %></t>
+              <td> <%= new Date(pwf.getTimestamp()) %> </td>
+              <td> <%= escapeXml(pwf.toString()) %> </td>
             </tr>
             <% } %>
           </table>
@@ -190,8 +192,8 @@
           <tr>
             <td> <%= cwf.getLogId() %></td>
             <td> <%= StringUtils.humanSize(cwf.getSize()) %> </td>
-            <td> <%= new Date(cwf.getTimestamp()) %></a></td>
-            <td> <%= escapeXml(cwf.toString()) %></t>
+            <td> <%= new Date(cwf.getTimestamp()) %> </td>
+            <td> <%= escapeXml(cwf.toString()) %> </td>
           </tr>
           <% } %>
           </table>
@@ -223,7 +225,7 @@
           <% for (int i = syncMetricsBuff.size() - 1; i >= 0; --i) { %>
           <%    WALProcedureStore.SyncMetrics syncMetrics = syncMetricsBuff.get(i); %>
           <tr>
-            <td> <%= new Date(syncMetrics.getTimestamp()) %></a></td>
+            <td> <%= new Date(syncMetrics.getTimestamp()) %></td>
             <td> <%= StringUtils.humanTimeDiff(syncMetrics.getSyncWaitMs()) %></td>
             <td> <%= syncMetrics.getSyncedEntries() %></td>
             <td> <%= StringUtils.humanSize(syncMetrics.getTotalSyncedBytes()) %></td>
@@ -235,6 +237,51 @@
       </div>
   </div>
 </div>
+<br />
+<div class="container-fluid content">
+  <div class="row">
+      <div class="page-header">
+          <h1>Locks</h1>
+      </div>
+  </div>
+  <% for (LockInfo lock : locks) { %>
+    <h2><%= lock.getResourceType() %>: <%= lock.getResourceName() %></h2>
+    <%
+      switch (lock.getLockType()) {
+      case EXCLUSIVE:
+    %>
+    <p>Lock type: EXCLUSIVE</p>
+    <p>Owner procedure ID: <%= lock.getExclusiveLockOwnerProcedure().getProcId() %></p>
+    <%
+        break;
+      case SHARED:
+    %>
+    <p>Lock type: SHARED</p>
+    <p>Number of shared locks: <%= lock.getSharedLockCount() %></p>
+    <%
+        break;
+      }
+
+      List<LockInfo.WaitingProcedure> waitingProcedures = lock.getWaitingProcedures();
+
+      if (!waitingProcedures.isEmpty()) {
+    %>
+	    <h3>Waiting procedures</h3>
+	    <table class="table table-striped" width="90%" >
+		    <tr>
+		      <th>Lock type</th>
+		      <th>Procedure ID</th>
+		    </tr>
+		    <% for (LockInfo.WaitingProcedure waitingProcedure : waitingProcedures) { %>
+		      <tr>
+	          <td><%= waitingProcedure.getLockType() %></td>
+	          <td><%= waitingProcedure.getProcedure().getProcId() %></td>
+		      </tr>
+		    <% } %>
+	    </table>
+    <% } %>
+  <% } %>
+</div>
 <script src="/static/js/jquery.min.js" type="text/javascript"></script>
 <script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
index 90f639b..75f75fc 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
@@ -94,7 +94,7 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/procedures.jsp">Procedures</a></li>
+                <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                 <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
                 <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
index aa9a17f..58f74f4 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
@@ -81,7 +81,7 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/procedures.jsp">Procedures</a></li>
+                <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                 <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
                 <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 0f8a289..0e1d1cf 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -158,7 +158,7 @@
             <ul class="nav navbar-nav">
                 <li><a href="/master-status">Home</a></li>
                 <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                <li><a href="/procedures.jsp">Procedures</a></li>
+                <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                 <li><a href="/logs/">Local Logs</a></li>
                 <li><a href="/logLevel">Log Level</a></li>
                 <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
index d21be3e..a485e8b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
@@ -64,7 +64,7 @@
               <ul class="nav navbar-nav">
                   <li class="active"><a href="/master-status">Home</a></li>
                   <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                  <li><a href="/procedures.jsp">Procedures</a></li>
+                  <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                   <li><a href="/logs/">Local Logs</a></li>
                   <li><a href="/logLevel">Log Level</a></li>
                   <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
index 6cd6c92..a2e6733 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
@@ -60,7 +60,7 @@
                     <ul class="nav navbar-nav">
                         <li><a href="/master-status">Home</a></li>
                         <li><a href="/tablesDetailed.jsp">Table Details</a></li>
-                        <li><a href="/procedures.jsp">Procedures</a></li>
+                        <li><a href="/procedures.jsp">Procedures &amp; Locks</a></li>
                         <li><a href="/logs/">Local Logs</a></li>
                         <li><a href="/logLevel">Log Level</a></li>
                         <li><a href="/dump">Debug Dump</a></li>

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 6b52e0c..3b80406 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -125,6 +126,8 @@ public class TestMasterObserver {
     private boolean postAbortProcedureCalled;
     private boolean preListProceduresCalled;
     private boolean postListProceduresCalled;
+    private boolean preListLocksCalled;
+    private boolean postListLocksCalled;
     private boolean preMoveCalled;
     private boolean postMoveCalled;
     private boolean preAssignCalled;
@@ -726,6 +729,25 @@ public class TestMasterObserver {
     }
 
     @Override
+    public void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
+      preListLocksCalled = true;
+    }
+
+    @Override
+    public void postListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx, List<LockInfo> lockInfoList)
+        throws IOException {
+      postListLocksCalled = true;
+    }
+
+    public boolean wasListLocksCalled() {
+      return preListLocksCalled && postListLocksCalled;
+    }
+
+    public boolean wasPreListLocksCalledOnly() {
+      return preListLocksCalled && !postListLocksCalled;
+    }
+
+    @Override
     public void preMove(ObserverContext<MasterCoprocessorEnvironment> env,
         HRegionInfo region, ServerName srcServer, ServerName destServer)
     throws IOException {
@@ -2164,6 +2186,22 @@ public class TestMasterObserver {
       cp.wasListProceduresCalled());
   }
 
+  @Test (timeout=180000)
+  public void testListLocksOperation() throws Exception {
+    MiniHBaseCluster cluster = UTIL.getHBaseCluster();
+
+    HMaster master = cluster.getMaster();
+    MasterCoprocessorHost host = master.getMasterCoprocessorHost();
+    CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
+        CPMasterObserver.class.getName());
+    cp.resetStates();
+
+    master.listLocks();
+    assertTrue(
+      "Coprocessor should be called on list locks request",
+      cp.wasListLocksCalled());
+  }
+
   private void deleteTable(Admin admin, TableName tableName) throws Exception {
     // NOTE: We need a latch because admin is not sync,
     // so the postOp coprocessor method may be called after the admin operation returned.

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 683e9b3..ff6b88e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -39,16 +39,14 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.mockito.Mockito;
-
 import com.google.protobuf.Service;
 
 public class MockNoopMasterServices implements MasterServices, Server {
@@ -221,6 +219,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
+  public List<LockInfo> listLocks() throws IOException {
+    return null;
+  }
+
+  @Override
   public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
     return null;  //To change body of implemented methods use File | Settings | File Templates.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 356c84f..e23c90a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,21 +18,24 @@
 
 package org.apache.hadoop.hbase.master.procedure;
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.Assert.*;
 import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 import java.util.Arrays;
-
+import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.LockInfo.WaitingProcedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -899,5 +902,161 @@ public class TestMasterProcedureScheduler {
       sb.append(")");
     }
   }
+
+  private static LockProcedure createLockProcedure(LockProcedure.LockType lockType, long procId) throws Exception {
+    LockProcedure procedure = new LockProcedure();
+
+    Field typeField = LockProcedure.class.getDeclaredField("type");
+    typeField.setAccessible(true);
+    typeField.set(procedure, lockType);
+
+    Method setProcIdMethod = Procedure.class.getDeclaredMethod("setProcId", long.class);
+    setProcIdMethod.setAccessible(true);
+    setProcIdMethod.invoke(procedure, procId);
+
+    return procedure;
+  }
+
+  private static LockProcedure createExclusiveLockProcedure(long procId) throws Exception {
+    return createLockProcedure(LockProcedure.LockType.EXCLUSIVE, procId);
+  }
+
+  private static LockProcedure createSharedLockProcedure(long procId) throws Exception {
+    return createLockProcedure(LockProcedure.LockType.SHARED, procId);
+  }
+
+  private static void assertLockResource(LockInfo lock,
+      LockInfo.ResourceType resourceType, String resourceName)
+  {
+    assertEquals(resourceType, lock.getResourceType());
+    assertEquals(resourceName, lock.getResourceName());
+  }
+
+  private static void assertExclusiveLock(LockInfo lock, long procId)
+  {
+    assertEquals(LockInfo.LockType.EXCLUSIVE, lock.getLockType());
+    assertEquals(procId, lock.getExclusiveLockOwnerProcedure().getProcId());
+    assertEquals(0, lock.getSharedLockCount());
+  }
+
+  private static void assertSharedLock(LockInfo lock, int lockCount)
+  {
+    assertEquals(LockInfo.LockType.SHARED, lock.getLockType());
+    assertEquals(lockCount, lock.getSharedLockCount());
+  }
+
+  @Test
+  public void testListLocksServer() throws Exception {
+    LockProcedure procedure = createExclusiveLockProcedure(0);
+    queue.waitServerExclusiveLock(procedure, ServerName.valueOf("server1,1234,0"));
+
+    List<LockInfo> locks = queue.listLocks();
+    assertEquals(1, locks.size());
+
+    LockInfo serverLock = locks.get(0);
+    assertLockResource(serverLock, LockInfo.ResourceType.SERVER, "server1,1234,0");
+    assertExclusiveLock(serverLock, 0);
+    assertTrue(serverLock.getWaitingProcedures().isEmpty());
+  }
+
+  @Test
+  public void testListLocksNamespace() throws Exception {
+    LockProcedure procedure = createExclusiveLockProcedure(1);
+    queue.waitNamespaceExclusiveLock(procedure, "ns1");
+
+    List<LockInfo> locks = queue.listLocks();
+    assertEquals(2, locks.size());
+
+    LockInfo namespaceLock = locks.get(0);
+    assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns1");
+    assertExclusiveLock(namespaceLock, 1);
+    assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+    LockInfo tableLock = locks.get(1);
+    assertLockResource(tableLock, LockInfo.ResourceType.TABLE,
+        TableName.NAMESPACE_TABLE_NAME.getNameAsString());
+    assertSharedLock(tableLock, 1);
+    assertTrue(tableLock.getWaitingProcedures().isEmpty());
+  }
+
+  @Test
+  public void testListLocksTable() throws Exception {
+    LockProcedure procedure = createExclusiveLockProcedure(2);
+    queue.waitTableExclusiveLock(procedure, TableName.valueOf("ns2", "table2"));
+
+    List<LockInfo> locks = queue.listLocks();
+    assertEquals(2, locks.size());
+
+    LockInfo namespaceLock = locks.get(0);
+    assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns2");
+    assertSharedLock(namespaceLock, 1);
+    assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+    LockInfo tableLock = locks.get(1);
+    assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns2:table2");
+    assertExclusiveLock(tableLock, 2);
+    assertTrue(tableLock.getWaitingProcedures().isEmpty());
+  }
+
+  @Test
+  public void testListLocksRegion() throws Exception {
+    LockProcedure procedure = createExclusiveLockProcedure(3);
+    HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf("ns3", "table3"));
+
+    queue.waitRegion(procedure, regionInfo);
+
+    List<LockInfo> locks = queue.listLocks();
+    assertEquals(3, locks.size());
+
+    LockInfo namespaceLock = locks.get(0);
+    assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns3");
+    assertSharedLock(namespaceLock, 1);
+    assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+    LockInfo tableLock = locks.get(1);
+    assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns3:table3");
+    assertSharedLock(tableLock, 1);
+    assertTrue(tableLock.getWaitingProcedures().isEmpty());
+
+    LockInfo regionLock = locks.get(2);
+    assertLockResource(regionLock, LockInfo.ResourceType.REGION, regionInfo.getEncodedName());
+    assertExclusiveLock(regionLock, 3);
+    assertTrue(regionLock.getWaitingProcedures().isEmpty());
+  }
+
+  @Test
+  public void testListLocksWaiting() throws Exception {
+    LockProcedure procedure1 = createExclusiveLockProcedure(1);
+    queue.waitTableExclusiveLock(procedure1, TableName.valueOf("ns4", "table4"));
+
+    LockProcedure procedure2 = createSharedLockProcedure(2);
+    queue.waitTableSharedLock(procedure2, TableName.valueOf("ns4", "table4"));
+
+    LockProcedure procedure3 = createExclusiveLockProcedure(3);
+    queue.waitTableExclusiveLock(procedure3, TableName.valueOf("ns4", "table4"));
+
+    List<LockInfo> locks = queue.listLocks();
+    assertEquals(2, locks.size());
+
+    LockInfo namespaceLock = locks.get(0);
+    assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns4");
+    assertSharedLock(namespaceLock, 1);
+    assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+    LockInfo tableLock = locks.get(1);
+    assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns4:table4");
+    assertExclusiveLock(tableLock, 1);
+
+    List<WaitingProcedure> waitingProcedures = tableLock.getWaitingProcedures();
+    assertEquals(2, waitingProcedures.size());
+
+    WaitingProcedure waitingProcedure1 = waitingProcedures.get(0);
+    assertEquals(LockInfo.LockType.SHARED, waitingProcedure1.getLockType());
+    assertEquals(2, waitingProcedure1.getProcedure().getProcId());
+
+    WaitingProcedure waitingProcedure2 = waitingProcedures.get(1);
+    assertEquals(LockInfo.LockType.EXCLUSIVE, waitingProcedure2.getLockType());
+    assertEquals(3, waitingProcedure2.getProcedure().getProcId());
+  }
 }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
index f943ce4..c88c370 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
@@ -20,21 +20,24 @@ package org.apache.hadoop.hbase.protobuf;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
 import org.apache.hadoop.hbase.ByteBufferKeyValue;
 import org.apache.hadoop.hbase.client.Append;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
@@ -336,4 +339,40 @@ public class TestProtobufUtil {
     Cell newOffheapKV = org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toCell(cell);
     assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
   }
+
+  private static ProcedureInfo createProcedureInfo(long procId)
+  {
+    return new ProcedureInfo(procId, "java.lang.Object", null,
+        ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
+  }
+
+  private static void assertProcedureInfoEquals(ProcedureInfo expected,
+      ProcedureInfo result)
+  {
+    if (expected == result) {
+      return;
+    } else if (expected == null || result == null) {
+      fail();
+    }
+
+    assertEquals(expected.getProcId(), result.getProcId());
+  }
+
+  private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
+  {
+    assertEquals(expected.getResourceType(), result.getResourceType());
+    assertEquals(expected.getResourceName(), result.getResourceName());
+    assertEquals(expected.getLockType(), result.getLockType());
+    assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
+        result.getExclusiveLockOwnerProcedure());
+    assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
+  }
+
+  private static void assertWaitingProcedureEquals(
+      LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
+  {
+    assertEquals(expected.getLockType(), result.getLockType());
+    assertProcedureInfoEquals(expected.getProcedure(),
+        result.getProcedure());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
new file mode 100644
index 0000000..da7c7c4
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.shaded.protobuf;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestProtobufUtil {
+  public TestProtobufUtil() {
+  }
+
+  private static ProcedureInfo createProcedureInfo(long procId)
+  {
+    return new ProcedureInfo(procId, "java.lang.Object", null,
+        ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
+  }
+
+  private static void assertProcedureInfoEquals(ProcedureInfo expected,
+      ProcedureInfo result)
+  {
+    if (expected == result) {
+      return;
+    } else if (expected == null || result == null) {
+      fail();
+    }
+
+    assertEquals(expected.getProcId(), result.getProcId());
+  }
+
+  private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
+  {
+    assertEquals(expected.getResourceType(), result.getResourceType());
+    assertEquals(expected.getResourceName(), result.getResourceName());
+    assertEquals(expected.getLockType(), result.getLockType());
+    assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
+        result.getExclusiveLockOwnerProcedure());
+    assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
+  }
+
+  private static void assertWaitingProcedureEquals(
+      LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
+  {
+    assertEquals(expected.getLockType(), result.getLockType());
+    assertProcedureInfoEquals(expected.getProcedure(),
+        result.getProcedure());
+  }
+
+  @Test
+  public void testServerLockInfo() {
+    LockInfo lock = new LockInfo();
+    lock.setResourceType(LockInfo.ResourceType.SERVER);
+    lock.setResourceName("server");
+    lock.setLockType(LockInfo.LockType.SHARED);
+    lock.setSharedLockCount(2);
+
+    LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+    LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+    assertLockInfoEquals(lock, lock2);
+  }
+
+  @Test
+  public void testNamespaceLockInfo() {
+    LockInfo lock = new LockInfo();
+    lock.setResourceType(LockInfo.ResourceType.NAMESPACE);
+    lock.setResourceName("ns");
+    lock.setLockType(LockInfo.LockType.EXCLUSIVE);
+    lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
+
+    LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+    LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+    assertLockInfoEquals(lock, lock2);
+  }
+
+  @Test
+  public void testTableLockInfo() {
+    LockInfo lock = new LockInfo();
+    lock.setResourceType(LockInfo.ResourceType.TABLE);
+    lock.setResourceName("table");
+    lock.setLockType(LockInfo.LockType.SHARED);
+    lock.setSharedLockCount(2);
+
+    LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+    LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+    assertLockInfoEquals(lock, lock2);
+  }
+
+  @Test
+  public void testRegionLockInfo() {
+    LockInfo lock = new LockInfo();
+    lock.setResourceType(LockInfo.ResourceType.REGION);
+    lock.setResourceName("region");
+    lock.setLockType(LockInfo.LockType.EXCLUSIVE);
+    lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
+
+    LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+    LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+    assertLockInfoEquals(lock, lock2);
+  }
+
+  @Test
+  public void testExclusiveWaitingLockInfo() {
+    LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+    waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
+    waitingProcedure.setProcedure(createProcedureInfo(1));
+
+    LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
+    LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
+
+    assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
+  }
+
+  @Test
+  public void testSharedWaitingLockInfo() {
+    LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+    waitingProcedure.setLockType(LockInfo.LockType.SHARED);
+    waitingProcedure.setProcedure(createProcedureInfo(2));
+
+    LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
+    LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
+
+    assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 6aaa130..41904be 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1192,6 +1192,11 @@ module Hbase
       @admin.listProcedures()
     end
 
+    # List all locks
+    def list_locks()
+      @admin.listLocks();
+    end
+
     # Parse arguments and update HTableDescriptor accordingly
     def update_htd_from_arg(htd, arg)
       htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 66480f9..fc55f94 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -436,10 +436,11 @@ Shell.load_command_group(
 
 Shell.load_command_group(
   'procedures',
-  :full_name => 'PROCEDURES MANAGEMENT',
+  :full_name => 'PROCEDURES & LOCKS MANAGEMENT',
   :commands => %w[
     abort_procedure
     list_procedures
+    list_locks
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/commands.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index 271a7d9..08f2e11 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -98,6 +98,11 @@ module Shell
         @formatter ||= ::Shell::Formatter::Console.new
       end
 
+      # for testing purposes to catch the output of the commands
+      def set_formatter(formatter)
+        @formatter = formatter
+      end
+
       def translate_hbase_exceptions(*args)
         yield
       rescue => e

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_locks.rb b/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
new file mode 100644
index 0000000..fca411b
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
@@ -0,0 +1,60 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class ListLocks < Command
+      def help
+        return <<-EOF
+List all locks in hbase. Examples:
+
+  hbase> list_locks
+EOF
+      end
+
+      def command()
+        list = admin.list_locks()
+
+        list.each do |lock|
+          formatter.output_strln("#{lock.resourceType}(#{lock.resourceName})")
+
+          case lock.lockType
+          when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::EXCLUSIVE then
+            formatter.output_strln("Lock type: EXCLUSIVE, procedure: #{lock.exclusiveLockOwnerProcedure.procId}")
+          when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::SHARED then
+            formatter.output_strln("Lock type: SHARED, count: #{lock.sharedLockCount}")
+          end
+
+          if lock.waitingProcedures.any?
+            formatter.output_strln("Waiting procedures:")
+            formatter.header([ "Lock type", "Procedure Id" ])
+
+            lock.waitingProcedures.each do |waitingProcedure|
+              formatter.row([ waitingProcedure.lockType.to_s, waitingProcedure.procedure.procId.to_s ]);
+            end
+
+            formatter.footer(lock.waitingProcedures.size)
+          end
+
+          formatter.output_strln("");
+        end
+      end
+    end
+  end
+end

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/formatter.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/formatter.rb b/hbase-shell/src/main/ruby/shell/formatter.rb
index 2f800f6..aa81c86 100644
--- a/hbase-shell/src/main/ruby/shell/formatter.rb
+++ b/hbase-shell/src/main/ruby/shell/formatter.rb
@@ -17,6 +17,8 @@
 # limitations under the License.
 #
 
+require 'stringio'
+
 # Results formatter
 module Shell
   module Formatter
@@ -25,7 +27,7 @@ module Shell
       attr_reader :row_count
 
       def is_valid_io?(obj)
-        obj.instance_of?(IO) || obj == Kernel
+        obj.instance_of?(IO) || obj.instance_of?(StringIO) || obj == Kernel
       end
 
       def refresh_width()
@@ -166,6 +168,11 @@ module Shell
         output(@max_width, str)
       end
 
+      def output_strln(str)
+        output_str(str)
+        @out.puts
+      end
+
       def output(width, str)
         if str == nil
           str = ''

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/test/ruby/shell/list_locks_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
new file mode 100644
index 0000000..fe132db
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -0,0 +1,152 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_constants'
+require 'shell'
+
+class ListLocksTest < Test::Unit::TestCase
+  def setup
+    @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
+    @shell = Shell::Shell.new(@hbase)
+    @master = $TEST_CLUSTER.getHBaseClusterInterface.getMaster
+    @scheduler = @master.getMasterProcedureExecutor.getEnvironment.getProcedureScheduler
+
+    @string_io = StringIO.new
+
+    @list_locks = Shell::Commands::ListLocks.new(@shell)
+    @list_locks.set_formatter(Shell::Formatter::Base.new({ :output_stream => @string_io }))
+  end
+
+  def set_field(object, field_name, value)
+    field = object.getClass.getDeclaredField(field_name)
+    field.setAccessible(true)
+    field.set(object, value)
+  end
+
+  def create_lock(type, proc_id)
+    lock = org.apache.hadoop.hbase.master.locking.LockProcedure.new()
+    set_field(lock, "type", type)
+    lock.procId = proc_id
+
+    return lock
+  end
+
+  def create_exclusive_lock(proc_id)
+    return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::EXCLUSIVE, proc_id)
+  end
+
+  def create_shared_lock(proc_id)
+    return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::SHARED, proc_id)
+  end
+
+  define_test "list server locks" do
+    lock = create_exclusive_lock(0)
+
+    server_name = org.apache.hadoop.hbase.ServerName.valueOf("server1,1234,0")
+
+    @scheduler.waitServerExclusiveLock(lock, server_name)
+    @list_locks.command()
+    @scheduler.wakeServerExclusiveLock(lock, server_name)
+
+    assert_equal(
+      "SERVER(server1,1234,0)\n" <<
+      "Lock type: EXCLUSIVE, procedure: 0\n\n",
+      @string_io.string)
+  end
+
+  define_test "list namespace locks" do
+    lock = create_exclusive_lock(1)
+
+    @scheduler.waitNamespaceExclusiveLock(lock, "ns1")
+    @list_locks.command()
+    @scheduler.wakeNamespaceExclusiveLock(lock, "ns1")
+
+    assert_equal(
+      "NAMESPACE(ns1)\n" <<
+      "Lock type: EXCLUSIVE, procedure: 1\n\n" <<
+      "TABLE(hbase:namespace)\n" <<
+      "Lock type: SHARED, count: 1\n\n",
+      @string_io.string)
+  end
+
+  define_test "list table locks" do
+    lock = create_exclusive_lock(2)
+
+    table_name = org.apache.hadoop.hbase.TableName.valueOf("ns2", "table2")
+
+    @scheduler.waitTableExclusiveLock(lock, table_name)
+    @list_locks.command()
+    @scheduler.wakeTableExclusiveLock(lock, table_name)
+
+    assert_equal(
+      "NAMESPACE(ns2)\n" <<
+      "Lock type: SHARED, count: 1\n\n" <<
+      "TABLE(ns2:table2)\n" <<
+      "Lock type: EXCLUSIVE, procedure: 2\n\n",
+      @string_io.string)
+  end
+
+  define_test "list region locks" do
+    lock = create_exclusive_lock(3)
+
+    table_name = org.apache.hadoop.hbase.TableName.valueOf("ns3", "table3")
+    region_info = org.apache.hadoop.hbase.HRegionInfo.new(table_name)
+
+    @scheduler.waitRegion(lock, region_info)
+    @list_locks.command()
+    @scheduler.wakeRegion(lock, region_info)
+
+    assert_equal(
+      "NAMESPACE(ns3)\n" <<
+      "Lock type: SHARED, count: 1\n\n" <<
+      "TABLE(ns3:table3)\n" <<
+      "Lock type: SHARED, count: 1\n\n" <<
+      "REGION(" << region_info.getEncodedName << ")\n" <<
+      "Lock type: EXCLUSIVE, procedure: 3\n\n",
+      @string_io.string)
+  end
+
+  define_test "list waiting locks" do
+    table_name = org.apache.hadoop.hbase.TableName.valueOf("ns4", "table4")
+
+    lock1 = create_exclusive_lock(1)
+    set_field(lock1, "tableName", table_name)
+
+    lock2 = create_shared_lock(2)
+    set_field(lock2, "tableName", table_name)
+
+    @scheduler.waitTableExclusiveLock(lock1, table_name)
+    @scheduler.waitTableSharedLock(lock2, table_name)
+    @list_locks.command()
+    @scheduler.wakeTableExclusiveLock(lock1, table_name)
+    @scheduler.wakeTableSharedLock(lock2, table_name)
+
+    assert_equal(
+      "NAMESPACE(ns4)\n" <<
+      "Lock type: SHARED, count: 1\n\n" <<
+      "TABLE(ns4:table4)\n" <<
+      "Lock type: EXCLUSIVE, procedure: 1\n" <<
+      "Waiting procedures:\n" <<
+      "Lock type  Procedure Id\n" <<
+      " SHARED 2\n" <<
+      "1 row(s)\n\n",
+      @string_io.string)
+  end
+
+end


[3/4] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
index 6dbf9b2..99853a5 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
@@ -104,6 +104,114 @@ public final class LockServiceProtos {
     // @@protoc_insertion_point(enum_scope:hbase.pb.LockType)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.ResourceType}
+   */
+  public enum ResourceType
+      implements org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+    /**
+     * <code>RESOURCE_TYPE_SERVER = 1;</code>
+     */
+    RESOURCE_TYPE_SERVER(1),
+    /**
+     * <code>RESOURCE_TYPE_NAMESPACE = 2;</code>
+     */
+    RESOURCE_TYPE_NAMESPACE(2),
+    /**
+     * <code>RESOURCE_TYPE_TABLE = 3;</code>
+     */
+    RESOURCE_TYPE_TABLE(3),
+    /**
+     * <code>RESOURCE_TYPE_REGION = 4;</code>
+     */
+    RESOURCE_TYPE_REGION(4),
+    ;
+
+    /**
+     * <code>RESOURCE_TYPE_SERVER = 1;</code>
+     */
+    public static final int RESOURCE_TYPE_SERVER_VALUE = 1;
+    /**
+     * <code>RESOURCE_TYPE_NAMESPACE = 2;</code>
+     */
+    public static final int RESOURCE_TYPE_NAMESPACE_VALUE = 2;
+    /**
+     * <code>RESOURCE_TYPE_TABLE = 3;</code>
+     */
+    public static final int RESOURCE_TYPE_TABLE_VALUE = 3;
+    /**
+     * <code>RESOURCE_TYPE_REGION = 4;</code>
+     */
+    public static final int RESOURCE_TYPE_REGION_VALUE = 4;
+
+
+    public final int getNumber() {
+      return value;
+    }
+
+    /**
+     * @deprecated Use {@link #forNumber(int)} instead.
+     */
+    @java.lang.Deprecated
+    public static ResourceType valueOf(int value) {
+      return forNumber(value);
+    }
+
+    public static ResourceType forNumber(int value) {
+      switch (value) {
+        case 1: return RESOURCE_TYPE_SERVER;
+        case 2: return RESOURCE_TYPE_NAMESPACE;
+        case 3: return RESOURCE_TYPE_TABLE;
+        case 4: return RESOURCE_TYPE_REGION;
+        default: return null;
+      }
+    }
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<ResourceType>
+        internalGetValueMap() {
+      return internalValueMap;
+    }
+    private static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+        ResourceType> internalValueMap =
+          new org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<ResourceType>() {
+            public ResourceType findValueByNumber(int number) {
+              return ResourceType.forNumber(number);
+            }
+          };
+
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+        getValueDescriptor() {
+      return getDescriptor().getValues().get(ordinal());
+    }
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptorForType() {
+      return getDescriptor();
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor().getEnumTypes().get(1);
+    }
+
+    private static final ResourceType[] VALUES = values();
+
+    public static ResourceType valueOf(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+      if (desc.getType() != getDescriptor()) {
+        throw new java.lang.IllegalArgumentException(
+          "EnumValueDescriptor is not for this type.");
+      }
+      return VALUES[desc.getIndex()];
+    }
+
+    private final int value;
+
+    private ResourceType(int value) {
+      this.value = value;
+    }
+
+    // @@protoc_insertion_point(enum_scope:hbase.pb.ResourceType)
+  }
+
   public interface LockRequestOrBuilder extends
       // @@protoc_insertion_point(interface_extends:hbase.pb.LockRequest)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -4898,70 +5006,2193 @@ public final class LockServiceProtos {
 
   }
 
+  public interface WaitingProcedureOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.WaitingProcedure)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    boolean hasLockType();
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType();
+
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    boolean hasProcedure();
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure();
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder();
+  }
   /**
-   * Protobuf service {@code hbase.pb.LockService}
+   * Protobuf type {@code hbase.pb.WaitingProcedure}
    */
-  public static abstract class LockService
-      implements org.apache.hadoop.hbase.shaded.com.google.protobuf.Service {
-    protected LockService() {}
+  public  static final class WaitingProcedure extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.WaitingProcedure)
+      WaitingProcedureOrBuilder {
+    // Use WaitingProcedure.newBuilder() to construct.
+    private WaitingProcedure(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private WaitingProcedure() {
+      lockType_ = 1;
+    }
 
-    public interface Interface {
-      /**
-       * <pre>
-       ** Acquire lock on namespace/table/region 
-       * </pre>
-       *
-       * <code>rpc RequestLock(.hbase.pb.LockRequest) returns (.hbase.pb.LockResponse);</code>
-       */
-      public abstract void requestLock(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest request,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse> done);
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private WaitingProcedure(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                lockType_ = rawValue;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000002) == 0x00000002)) {
+                subBuilder = procedure_.toBuilder();
+              }
+              procedure_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(procedure_);
+                procedure_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000002;
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor;
+    }
 
-      /**
-       * <pre>
-       ** Keep alive (or not) a previously acquired lock 
-       * </pre>
-       *
-       * <code>rpc LockHeartbeat(.hbase.pb.LockHeartbeatRequest) returns (.hbase.pb.LockHeartbeatResponse);</code>
-       */
-      public abstract void lockHeartbeat(
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest request,
-          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse> done);
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder.class);
+    }
 
+    private int bitField0_;
+    public static final int LOCK_TYPE_FIELD_NUMBER = 1;
+    private int lockType_;
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    public boolean hasLockType() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+      return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
     }
 
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Service newReflectiveService(
-        final Interface impl) {
-      return new LockService() {
-        @java.lang.Override
-        public  void requestLock(
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest request,
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse> done) {
-          impl.requestLock(controller, request, done);
-        }
+    public static final int PROCEDURE_FIELD_NUMBER = 2;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure procedure_;
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    public boolean hasProcedure() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure() {
+      return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_;
+    }
+    /**
+     * <code>required .hbase.pb.Procedure procedure = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder() {
+      return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_;
+    }
 
-        @java.lang.Override
-        public  void lockHeartbeat(
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
-            org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatRequest request,
-            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockHeartbeatResponse> done) {
-          impl.lockHeartbeat(controller, request, done);
-        }
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
 
-      };
+      if (!hasLockType()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasProcedure()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!getProcedure().isInitialized()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      memoizedIsInitialized = 1;
+      return true;
     }
 
-    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService
-        newReflectiveBlockingService(final BlockingInterface impl) {
-      return new org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService() {
-        public final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.ServiceDescriptor
-            getDescriptorForType() {
-          return getDescriptor();
-        }
-
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, lockType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        output.writeMessage(2, getProcedure());
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, lockType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(2, getProcedure());
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) obj;
+
+      boolean result = true;
+      result = result && (hasLockType() == other.hasLockType());
+      if (hasLockType()) {
+        result = result && lockType_ == other.lockType_;
+      }
+      result = result && (hasProcedure() == other.hasProcedure());
+      if (hasProcedure()) {
+        result = result && getProcedure()
+            .equals(other.getProcedure());
+      }
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptor().hashCode();
+      if (hasLockType()) {
+        hash = (37 * hash) + LOCK_TYPE_FIELD_NUMBER;
+        hash = (53 * hash) + lockType_;
+      }
+      if (hasProcedure()) {
+        hash = (37 * hash) + PROCEDURE_FIELD_NUMBER;
+        hash = (53 * hash) + getProcedure().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.WaitingProcedure}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.WaitingProcedure)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getProcedureFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        lockType_ = 1;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        if (procedureBuilder_ == null) {
+          procedure_ = null;
+        } else {
+          procedureBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_WaitingProcedure_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.lockType_ = lockType_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        if (procedureBuilder_ == null) {
+          result.procedure_ = procedure_;
+        } else {
+          result.procedure_ = procedureBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.getDefaultInstance()) return this;
+        if (other.hasLockType()) {
+          setLockType(other.getLockType());
+        }
+        if (other.hasProcedure()) {
+          mergeProcedure(other.getProcedure());
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasLockType()) {
+          return false;
+        }
+        if (!hasProcedure()) {
+          return false;
+        }
+        if (!getProcedure().isInitialized()) {
+          return false;
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private int lockType_ = 1;
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public boolean hasLockType() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+        return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public Builder setLockType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        lockType_ = value.getNumber();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 1;</code>
+       */
+      public Builder clearLockType() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        lockType_ = 1;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure procedure_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> procedureBuilder_;
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public boolean hasProcedure() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getProcedure() {
+        if (procedureBuilder_ == null) {
+          return procedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_;
+        } else {
+          return procedureBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public Builder setProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) {
+        if (procedureBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          procedure_ = value;
+          onChanged();
+        } else {
+          procedureBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public Builder setProcedure(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) {
+        if (procedureBuilder_ == null) {
+          procedure_ = builderForValue.build();
+          onChanged();
+        } else {
+          procedureBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public Builder mergeProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) {
+        if (procedureBuilder_ == null) {
+          if (((bitField0_ & 0x00000002) == 0x00000002) &&
+              procedure_ != null &&
+              procedure_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) {
+            procedure_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.newBuilder(procedure_).mergeFrom(value).buildPartial();
+          } else {
+            procedure_ = value;
+          }
+          onChanged();
+        } else {
+          procedureBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000002;
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public Builder clearProcedure() {
+        if (procedureBuilder_ == null) {
+          procedure_ = null;
+          onChanged();
+        } else {
+          procedureBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000002);
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder getProcedureBuilder() {
+        bitField0_ |= 0x00000002;
+        onChanged();
+        return getProcedureFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getProcedureOrBuilder() {
+        if (procedureBuilder_ != null) {
+          return procedureBuilder_.getMessageOrBuilder();
+        } else {
+          return procedure_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : procedure_;
+        }
+      }
+      /**
+       * <code>required .hbase.pb.Procedure procedure = 2;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> 
+          getProcedureFieldBuilder() {
+        if (procedureBuilder_ == null) {
+          procedureBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>(
+                  getProcedure(),
+                  getParentForChildren(),
+                  isClean());
+          procedure_ = null;
+        }
+        return procedureBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.WaitingProcedure)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.WaitingProcedure)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<WaitingProcedure>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<WaitingProcedure>() {
+      public WaitingProcedure parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new WaitingProcedure(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<WaitingProcedure> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<WaitingProcedure> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface LockInfoOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.LockInfo)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+     */
+    boolean hasResourceType();
+    /**
+     * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType();
+
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    boolean hasResourceName();
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    java.lang.String getResourceName();
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getResourceNameBytes();
+
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 3;</code>
+     */
+    boolean hasLockType();
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 3;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType();
+
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    boolean hasExclusiveLockOwnerProcedure();
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure();
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder();
+
+    /**
+     * <code>optional int32 shared_lock_count = 5;</code>
+     */
+    boolean hasSharedLockCount();
+    /**
+     * <code>optional int32 shared_lock_count = 5;</code>
+     */
+    int getSharedLockCount();
+
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure> 
+        getWaitingProceduresList();
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getWaitingProcedures(int index);
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    int getWaitingProceduresCount();
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder> 
+        getWaitingProceduresOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder getWaitingProceduresOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.LockInfo}
+   */
+  public  static final class LockInfo extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.LockInfo)
+      LockInfoOrBuilder {
+    // Use LockInfo.newBuilder() to construct.
+    private LockInfo(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private LockInfo() {
+      resourceType_ = 1;
+      resourceName_ = "";
+      lockType_ = 1;
+      sharedLockCount_ = 0;
+      waitingProcedures_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private LockInfo(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 8: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(1, rawValue);
+              } else {
+                bitField0_ |= 0x00000001;
+                resourceType_ = rawValue;
+              }
+              break;
+            }
+            case 18: {
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = input.readBytes();
+              bitField0_ |= 0x00000002;
+              resourceName_ = bs;
+              break;
+            }
+            case 24: {
+              int rawValue = input.readEnum();
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(rawValue);
+              if (value == null) {
+                unknownFields.mergeVarintField(3, rawValue);
+              } else {
+                bitField0_ |= 0x00000004;
+                lockType_ = rawValue;
+              }
+              break;
+            }
+            case 34: {
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder subBuilder = null;
+              if (((bitField0_ & 0x00000008) == 0x00000008)) {
+                subBuilder = exclusiveLockOwnerProcedure_.toBuilder();
+              }
+              exclusiveLockOwnerProcedure_ = input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.PARSER, extensionRegistry);
+              if (subBuilder != null) {
+                subBuilder.mergeFrom(exclusiveLockOwnerProcedure_);
+                exclusiveLockOwnerProcedure_ = subBuilder.buildPartial();
+              }
+              bitField0_ |= 0x00000008;
+              break;
+            }
+            case 40: {
+              bitField0_ |= 0x00000010;
+              sharedLockCount_ = input.readInt32();
+              break;
+            }
+            case 50: {
+              if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+                waitingProcedures_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure>();
+                mutable_bitField0_ |= 0x00000020;
+              }
+              waitingProcedures_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
+          waitingProcedures_ = java.util.Collections.unmodifiableList(waitingProcedures_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder.class);
+    }
+
+    private int bitField0_;
+    public static final int RESOURCE_TYPE_FIELD_NUMBER = 1;
+    private int resourceType_;
+    /**
+     * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+     */
+    public boolean hasResourceType() {
+      return ((bitField0_ & 0x00000001) == 0x00000001);
+    }
+    /**
+     * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType() {
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(resourceType_);
+      return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER : result;
+    }
+
+    public static final int RESOURCE_NAME_FIELD_NUMBER = 2;
+    private volatile java.lang.Object resourceName_;
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    public boolean hasResourceName() {
+      return ((bitField0_ & 0x00000002) == 0x00000002);
+    }
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    public java.lang.String getResourceName() {
+      java.lang.Object ref = resourceName_;
+      if (ref instanceof java.lang.String) {
+        return (java.lang.String) ref;
+      } else {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs = 
+            (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        java.lang.String s = bs.toStringUtf8();
+        if (bs.isValidUtf8()) {
+          resourceName_ = s;
+        }
+        return s;
+      }
+    }
+    /**
+     * <code>optional string resource_name = 2;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+        getResourceNameBytes() {
+      java.lang.Object ref = resourceName_;
+      if (ref instanceof java.lang.String) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                (java.lang.String) ref);
+        resourceName_ = b;
+        return b;
+      } else {
+        return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+      }
+    }
+
+    public static final int LOCK_TYPE_FIELD_NUMBER = 3;
+    private int lockType_;
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 3;</code>
+     */
+    public boolean hasLockType() {
+      return ((bitField0_ & 0x00000004) == 0x00000004);
+    }
+    /**
+     * <code>required .hbase.pb.LockType lock_type = 3;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+      return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
+    }
+
+    public static final int EXCLUSIVE_LOCK_OWNER_PROCEDURE_FIELD_NUMBER = 4;
+    private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure exclusiveLockOwnerProcedure_;
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    public boolean hasExclusiveLockOwnerProcedure() {
+      return ((bitField0_ & 0x00000008) == 0x00000008);
+    }
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure() {
+      return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_;
+    }
+    /**
+     * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder() {
+      return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_;
+    }
+
+    public static final int SHARED_LOCK_COUNT_FIELD_NUMBER = 5;
+    private int sharedLockCount_;
+    /**
+     * <code>optional int32 shared_lock_count = 5;</code>
+     */
+    public boolean hasSharedLockCount() {
+      return ((bitField0_ & 0x00000010) == 0x00000010);
+    }
+    /**
+     * <code>optional int32 shared_lock_count = 5;</code>
+     */
+    public int getSharedLockCount() {
+      return sharedLockCount_;
+    }
+
+    public static final int WAITINGPROCEDURES_FIELD_NUMBER = 6;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure> waitingProcedures_;
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure> getWaitingProceduresList() {
+      return waitingProcedures_;
+    }
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder> 
+        getWaitingProceduresOrBuilderList() {
+      return waitingProcedures_;
+    }
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    public int getWaitingProceduresCount() {
+      return waitingProcedures_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedure getWaitingProcedures(int index) {
+      return waitingProcedures_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.WaitingProcedure waitingProcedures = 6;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.WaitingProcedureOrBuilder getWaitingProceduresOrBuilder(
+        int index) {
+      return waitingProcedures_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      if (!hasResourceType()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (!hasLockType()) {
+        memoizedIsInitialized = 0;
+        return false;
+      }
+      if (hasExclusiveLockOwnerProcedure()) {
+        if (!getExclusiveLockOwnerProcedure().isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      for (int i = 0; i < getWaitingProceduresCount(); i++) {
+        if (!getWaitingProcedures(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        output.writeEnum(1, resourceType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.writeString(output, 2, resourceName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        output.writeEnum(3, lockType_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        output.writeMessage(4, getExclusiveLockOwnerProcedure());
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        output.writeInt32(5, sharedLockCount_);
+      }
+      for (int i = 0; i < waitingProcedures_.size(); i++) {
+        output.writeMessage(6, waitingProcedures_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      if (((bitField0_ & 0x00000001) == 0x00000001)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeEnumSize(1, resourceType_);
+      }
+      if (((bitField0_ & 0x00000002) == 0x00000002)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.computeStringSize(2, resourceName_);
+      }
+      if (((bitField0_ & 0x00000004) == 0x00000004)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeEnumSize(3, lockType_);
+      }
+      if (((bitField0_ & 0x00000008) == 0x00000008)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(4, getExclusiveLockOwnerProcedure());
+      }
+      if (((bitField0_ & 0x00000010) == 0x00000010)) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeInt32Size(5, sharedLockCount_);
+      }
+      for (int i = 0; i < waitingProcedures_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(6, waitingProcedures_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo other = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) obj;
+
+      boolean result = true;
+      result = result && (hasResourceType() == other.hasResourceType());
+      if (hasResourceType()) {
+        result = result && resourceType_ == other.resourceType_;
+      }
+      result = result && (hasResourceName() == other.hasResourceName());
+      if (hasResourceName()) {
+        result = result && getResourceName()
+            .equals(other.getResourceName());
+      }
+      result = result && (hasLockType() == other.hasLockType());
+      if (hasLockType()) {
+        result = result && lockType_ == other.lockType_;
+      }
+      result = result && (hasExclusiveLockOwnerProcedure() == other.hasExclusiveLockOwnerProcedure());
+      if (hasExclusiveLockOwnerProcedure()) {
+        result = result && getExclusiveLockOwnerProcedure()
+            .equals(other.getExclusiveLockOwnerProcedure());
+      }
+      result = result && (hasSharedLockCount() == other.hasSharedLockCount());
+      if (hasSharedLockCount()) {
+        result = result && (getSharedLockCount()
+            == other.getSharedLockCount());
+      }
+      result = result && getWaitingProceduresList()
+          .equals(other.getWaitingProceduresList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptor().hashCode();
+      if (hasResourceType()) {
+        hash = (37 * hash) + RESOURCE_TYPE_FIELD_NUMBER;
+        hash = (53 * hash) + resourceType_;
+      }
+      if (hasResourceName()) {
+        hash = (37 * hash) + RESOURCE_NAME_FIELD_NUMBER;
+        hash = (53 * hash) + getResourceName().hashCode();
+      }
+      if (hasLockType()) {
+        hash = (37 * hash) + LOCK_TYPE_FIELD_NUMBER;
+        hash = (53 * hash) + lockType_;
+      }
+      if (hasExclusiveLockOwnerProcedure()) {
+        hash = (37 * hash) + EXCLUSIVE_LOCK_OWNER_PROCEDURE_FIELD_NUMBER;
+        hash = (53 * hash) + getExclusiveLockOwnerProcedure().hashCode();
+      }
+      if (hasSharedLockCount()) {
+        hash = (37 * hash) + SHARED_LOCK_COUNT_FIELD_NUMBER;
+        hash = (53 * hash) + getSharedLockCount();
+      }
+      if (getWaitingProceduresCount() > 0) {
+        hash = (37 * hash) + WAITINGPROCEDURES_FIELD_NUMBER;
+        hash = (53 * hash) + getWaitingProceduresList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.LockInfo}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.LockInfo)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.class, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getExclusiveLockOwnerProcedureFieldBuilder();
+          getWaitingProceduresFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        resourceType_ = 1;
+        bitField0_ = (bitField0_ & ~0x00000001);
+        resourceName_ = "";
+        bitField0_ = (bitField0_ & ~0x00000002);
+        lockType_ = 1;
+        bitField0_ = (bitField0_ & ~0x00000004);
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          exclusiveLockOwnerProcedure_ = null;
+        } else {
+          exclusiveLockOwnerProcedureBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        sharedLockCount_ = 0;
+        bitField0_ = (bitField0_ & ~0x00000010);
+        if (waitingProceduresBuilder_ == null) {
+          waitingProcedures_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000020);
+        } else {
+          waitingProceduresBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.internal_static_hbase_pb_LockInfo_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo result = new org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo(this);
+        int from_bitField0_ = bitField0_;
+        int to_bitField0_ = 0;
+        if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
+          to_bitField0_ |= 0x00000001;
+        }
+        result.resourceType_ = resourceType_;
+        if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
+          to_bitField0_ |= 0x00000002;
+        }
+        result.resourceName_ = resourceName_;
+        if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
+          to_bitField0_ |= 0x00000004;
+        }
+        result.lockType_ = lockType_;
+        if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
+          to_bitField0_ |= 0x00000008;
+        }
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          result.exclusiveLockOwnerProcedure_ = exclusiveLockOwnerProcedure_;
+        } else {
+          result.exclusiveLockOwnerProcedure_ = exclusiveLockOwnerProcedureBuilder_.build();
+        }
+        if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
+          to_bitField0_ |= 0x00000010;
+        }
+        result.sharedLockCount_ = sharedLockCount_;
+        if (waitingProceduresBuilder_ == null) {
+          if (((bitField0_ & 0x00000020) == 0x00000020)) {
+            waitingProcedures_ = java.util.Collections.unmodifiableList(waitingProcedures_);
+            bitField0_ = (bitField0_ & ~0x00000020);
+          }
+          result.waitingProcedures_ = waitingProcedures_;
+        } else {
+          result.waitingProcedures_ = waitingProceduresBuilder_.build();
+        }
+        result.bitField0_ = to_bitField0_;
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance()) return this;
+        if (other.hasResourceType()) {
+          setResourceType(other.getResourceType());
+        }
+        if (other.hasResourceName()) {
+          bitField0_ |= 0x00000002;
+          resourceName_ = other.resourceName_;
+          onChanged();
+        }
+        if (other.hasLockType()) {
+          setLockType(other.getLockType());
+        }
+        if (other.hasExclusiveLockOwnerProcedure()) {
+          mergeExclusiveLockOwnerProcedure(other.getExclusiveLockOwnerProcedure());
+        }
+        if (other.hasSharedLockCount()) {
+          setSharedLockCount(other.getSharedLockCount());
+        }
+        if (waitingProceduresBuilder_ == null) {
+          if (!other.waitingProcedures_.isEmpty()) {
+            if (waitingProcedures_.isEmpty()) {
+              waitingProcedures_ = other.waitingProcedures_;
+              bitField0_ = (bitField0_ & ~0x00000020);
+            } else {
+              ensureWaitingProceduresIsMutable();
+              waitingProcedures_.addAll(other.waitingProcedures_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.waitingProcedures_.isEmpty()) {
+            if (waitingProceduresBuilder_.isEmpty()) {
+              waitingProceduresBuilder_.dispose();
+              waitingProceduresBuilder_ = null;
+              waitingProcedures_ = other.waitingProcedures_;
+              bitField0_ = (bitField0_ & ~0x00000020);
+              waitingProceduresBuilder_ = 
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getWaitingProceduresFieldBuilder() : null;
+            } else {
+              waitingProceduresBuilder_.addAllMessages(other.waitingProcedures_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        if (!hasResourceType()) {
+          return false;
+        }
+        if (!hasLockType()) {
+          return false;
+        }
+        if (hasExclusiveLockOwnerProcedure()) {
+          if (!getExclusiveLockOwnerProcedure().isInitialized()) {
+            return false;
+          }
+        }
+        for (int i = 0; i < getWaitingProceduresCount(); i++) {
+          if (!getWaitingProcedures(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private int resourceType_ = 1;
+      /**
+       * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+       */
+      public boolean hasResourceType() {
+        return ((bitField0_ & 0x00000001) == 0x00000001);
+      }
+      /**
+       * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType getResourceType() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.valueOf(resourceType_);
+        return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER : result;
+      }
+      /**
+       * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+       */
+      public Builder setResourceType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.ResourceType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000001;
+        resourceType_ = value.getNumber();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.ResourceType resource_type = 1;</code>
+       */
+      public Builder clearResourceType() {
+        bitField0_ = (bitField0_ & ~0x00000001);
+        resourceType_ = 1;
+        onChanged();
+        return this;
+      }
+
+      private java.lang.Object resourceName_ = "";
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public boolean hasResourceName() {
+        return ((bitField0_ & 0x00000002) == 0x00000002);
+      }
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public java.lang.String getResourceName() {
+        java.lang.Object ref = resourceName_;
+        if (!(ref instanceof java.lang.String)) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString bs =
+              (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+          java.lang.String s = bs.toStringUtf8();
+          if (bs.isValidUtf8()) {
+            resourceName_ = s;
+          }
+          return s;
+        } else {
+          return (java.lang.String) ref;
+        }
+      }
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString
+          getResourceNameBytes() {
+        java.lang.Object ref = resourceName_;
+        if (ref instanceof String) {
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString b = 
+              org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString.copyFromUtf8(
+                  (java.lang.String) ref);
+          resourceName_ = b;
+          return b;
+        } else {
+          return (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ref;
+        }
+      }
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public Builder setResourceName(
+          java.lang.String value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        resourceName_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public Builder clearResourceName() {
+        bitField0_ = (bitField0_ & ~0x00000002);
+        resourceName_ = getDefaultInstance().getResourceName();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional string resource_name = 2;</code>
+       */
+      public Builder setResourceNameBytes(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value) {
+        if (value == null) {
+    throw new NullPointerException();
+  }
+  bitField0_ |= 0x00000002;
+        resourceName_ = value;
+        onChanged();
+        return this;
+      }
+
+      private int lockType_ = 1;
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 3;</code>
+       */
+      public boolean hasLockType() {
+        return ((bitField0_ & 0x00000004) == 0x00000004);
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 3;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType getLockType() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType result = org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.valueOf(lockType_);
+        return result == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType.EXCLUSIVE : result;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 3;</code>
+       */
+      public Builder setLockType(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType value) {
+        if (value == null) {
+          throw new NullPointerException();
+        }
+        bitField0_ |= 0x00000004;
+        lockType_ = value.getNumber();
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>required .hbase.pb.LockType lock_type = 3;</code>
+       */
+      public Builder clearLockType() {
+        bitField0_ = (bitField0_ & ~0x00000004);
+        lockType_ = 1;
+        onChanged();
+        return this;
+      }
+
+      private org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure exclusiveLockOwnerProcedure_ = null;
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> exclusiveLockOwnerProcedureBuilder_;
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public boolean hasExclusiveLockOwnerProcedure() {
+        return ((bitField0_ & 0x00000008) == 0x00000008);
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure getExclusiveLockOwnerProcedure() {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          return exclusiveLockOwnerProcedure_ == null ? org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_;
+        } else {
+          return exclusiveLockOwnerProcedureBuilder_.getMessage();
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public Builder setExclusiveLockOwnerProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          exclusiveLockOwnerProcedure_ = value;
+          onChanged();
+        } else {
+          exclusiveLockOwnerProcedureBuilder_.setMessage(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public Builder setExclusiveLockOwnerProcedure(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder builderForValue) {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          exclusiveLockOwnerProcedure_ = builderForValue.build();
+          onChanged();
+        } else {
+          exclusiveLockOwnerProcedureBuilder_.setMessage(builderForValue.build());
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public Builder mergeExclusiveLockOwnerProcedure(org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure value) {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          if (((bitField0_ & 0x00000008) == 0x00000008) &&
+              exclusiveLockOwnerProcedure_ != null &&
+              exclusiveLockOwnerProcedure_ != org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance()) {
+            exclusiveLockOwnerProcedure_ =
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.newBuilder(exclusiveLockOwnerProcedure_).mergeFrom(value).buildPartial();
+          } else {
+            exclusiveLockOwnerProcedure_ = value;
+          }
+          onChanged();
+        } else {
+          exclusiveLockOwnerProcedureBuilder_.mergeFrom(value);
+        }
+        bitField0_ |= 0x00000008;
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public Builder clearExclusiveLockOwnerProcedure() {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          exclusiveLockOwnerProcedure_ = null;
+          onChanged();
+        } else {
+          exclusiveLockOwnerProcedureBuilder_.clear();
+        }
+        bitField0_ = (bitField0_ & ~0x00000008);
+        return this;
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder getExclusiveLockOwnerProcedureBuilder() {
+        bitField0_ |= 0x00000008;
+        onChanged();
+        return getExclusiveLockOwnerProcedureFieldBuilder().getBuilder();
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder getExclusiveLockOwnerProcedureOrBuilder() {
+        if (exclusiveLockOwnerProcedureBuilder_ != null) {
+          return exclusiveLockOwnerProcedureBuilder_.getMessageOrBuilder();
+        } else {
+          return exclusiveLockOwnerProcedure_ == null ?
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.getDefaultInstance() : exclusiveLockOwnerProcedure_;
+        }
+      }
+      /**
+       * <code>optional .hbase.pb.Procedure exclusive_lock_owner_procedure = 4;</code>
+       */
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder> 
+          getExclusiveLockOwnerProcedureFieldBuilder() {
+        if (exclusiveLockOwnerProcedureBuilder_ == null) {
+          exclusiveLockOwnerProcedureBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.SingleFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureOrBuilder>(
+                  getExclusiveLockOwnerProcedure(),
+                  getParentForChildren(),
+                  isClean());
+          exclusiveLockOwnerProcedure_ = null;
+        }
+        return exclusiveLockOwnerProcedureBuilder_;
+      }
+
+      private int sharedLockCount_ ;
+      /**
+       * <code>optional int32 shared_lock_count = 5;</code>
+       */
+      public boolean hasSharedLockCount() {
+        return ((bitField0_ & 0x00000010) == 0x00000010);
+      }
+      /**
+       * <code>optional int32 shared_lock_count = 5;</code>
+       */
+      public int getSharedLockCount() {
+        return sharedLockCount_;
+      }
+      /**
+       * <code>optional int32 shared_lock_count = 5;</code>
+       */
+      public Builder setSharedLockCount(int value) {
+        bitField0_ |= 0x00000010;
+        sharedLockCount_ = value;
+        onChanged();
+        return this;
+      }
+      /**
+       * <code>optional int32 shared_lock_count = 5;</code>
+       */
+      public Builder clearSharedL

<TRUNCATED>

[2/4] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

Posted by st...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 8ff19b2..e4ce4cb 100644
--- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -62144,6 +62144,1133 @@ public final class MasterProtos {
 
   }
 
+  public interface ListLocksRequestOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ListLocksRequest)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListLocksRequest}
+   */
+  public  static final class ListLocksRequest extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.ListLocksRequest)
+      ListLocksRequestOrBuilder {
+    // Use ListLocksRequest.newBuilder() to construct.
+    private ListLocksRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private ListLocksRequest() {
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListLocksRequest(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.Builder.class);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) obj;
+
+      boolean result = true;
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptor().hashCode();
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.ListLocksRequest}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.ListLocksRequest)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequestOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest(this);
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance()) return this;
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.ListLocksRequest)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.ListLocksRequest)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksRequest>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<ListLocksRequest>() {
+      public ListLocksRequest parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new ListLocksRequest(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksRequest> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksRequest> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
+  public interface ListLocksResponseOrBuilder extends
+      // @@protoc_insertion_point(interface_extends:hbase.pb.ListLocksResponse)
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> 
+        getLockList();
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index);
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    int getLockCount();
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> 
+        getLockOrBuilderList();
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder(
+        int index);
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListLocksResponse}
+   */
+  public  static final class ListLocksResponse extends
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 implements
+      // @@protoc_insertion_point(message_implements:hbase.pb.ListLocksResponse)
+      ListLocksResponseOrBuilder {
+    // Use ListLocksResponse.newBuilder() to construct.
+    private ListLocksResponse(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<?> builder) {
+      super(builder);
+    }
+    private ListLocksResponse() {
+      lock_ = java.util.Collections.emptyList();
+    }
+
+    @java.lang.Override
+    public final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+    getUnknownFields() {
+      return this.unknownFields;
+    }
+    private ListLocksResponse(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      this();
+      int mutable_bitField0_ = 0;
+      org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+      try {
+        boolean done = false;
+        while (!done) {
+          int tag = input.readTag();
+          switch (tag) {
+            case 0:
+              done = true;
+              break;
+            default: {
+              if (!parseUnknownField(input, unknownFields,
+                                     extensionRegistry, tag)) {
+                done = true;
+              }
+              break;
+            }
+            case 10: {
+              if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+                lock_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo>();
+                mutable_bitField0_ |= 0x00000001;
+              }
+              lock_.add(
+                  input.readMessage(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.PARSER, extensionRegistry));
+              break;
+            }
+          }
+        }
+      } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+        throw e.setUnfinishedMessage(this);
+      } catch (java.io.IOException e) {
+        throw new org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+            e).setUnfinishedMessage(this);
+      } finally {
+        if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) {
+          lock_ = java.util.Collections.unmodifiableList(lock_);
+        }
+        this.unknownFields = unknownFields.build();
+        makeExtensionsImmutable();
+      }
+    }
+    public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+        getDescriptor() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor;
+    }
+
+    protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+        internalGetFieldAccessorTable() {
+      return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable
+          .ensureFieldAccessorsInitialized(
+              org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.Builder.class);
+    }
+
+    public static final int LOCK_FIELD_NUMBER = 1;
+    private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> lock_;
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> getLockList() {
+      return lock_;
+    }
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> 
+        getLockOrBuilderList() {
+      return lock_;
+    }
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    public int getLockCount() {
+      return lock_.size();
+    }
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index) {
+      return lock_.get(index);
+    }
+    /**
+     * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+     */
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder(
+        int index) {
+      return lock_.get(index);
+    }
+
+    private byte memoizedIsInitialized = -1;
+    public final boolean isInitialized() {
+      byte isInitialized = memoizedIsInitialized;
+      if (isInitialized == 1) return true;
+      if (isInitialized == 0) return false;
+
+      for (int i = 0; i < getLockCount(); i++) {
+        if (!getLock(i).isInitialized()) {
+          memoizedIsInitialized = 0;
+          return false;
+        }
+      }
+      memoizedIsInitialized = 1;
+      return true;
+    }
+
+    public void writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream output)
+                        throws java.io.IOException {
+      for (int i = 0; i < lock_.size(); i++) {
+        output.writeMessage(1, lock_.get(i));
+      }
+      unknownFields.writeTo(output);
+    }
+
+    public int getSerializedSize() {
+      int size = memoizedSize;
+      if (size != -1) return size;
+
+      size = 0;
+      for (int i = 0; i < lock_.size(); i++) {
+        size += org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream
+          .computeMessageSize(1, lock_.get(i));
+      }
+      size += unknownFields.getSerializedSize();
+      memoizedSize = size;
+      return size;
+    }
+
+    private static final long serialVersionUID = 0L;
+    @java.lang.Override
+    public boolean equals(final java.lang.Object obj) {
+      if (obj == this) {
+       return true;
+      }
+      if (!(obj instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse)) {
+        return super.equals(obj);
+      }
+      org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse other = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) obj;
+
+      boolean result = true;
+      result = result && getLockList()
+          .equals(other.getLockList());
+      result = result && unknownFields.equals(other.unknownFields);
+      return result;
+    }
+
+    @java.lang.Override
+    public int hashCode() {
+      if (memoizedHashCode != 0) {
+        return memoizedHashCode;
+      }
+      int hash = 41;
+      hash = (19 * hash) + getDescriptor().hashCode();
+      if (getLockCount() > 0) {
+        hash = (37 * hash) + LOCK_FIELD_NUMBER;
+        hash = (53 * hash) + getLockList().hashCode();
+      }
+      hash = (29 * hash) + unknownFields.hashCode();
+      memoizedHashCode = hash;
+      return hash;
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(byte[] data)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        byte[] data,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+      return PARSER.parseFrom(data, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseDelimitedFrom(java.io.InputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseDelimitedFrom(
+        java.io.InputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input);
+    }
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parseFrom(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+        throws java.io.IOException {
+      return org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+          .parseWithIOException(PARSER, input, extensionRegistry);
+    }
+
+    public Builder newBuilderForType() { return newBuilder(); }
+    public static Builder newBuilder() {
+      return DEFAULT_INSTANCE.toBuilder();
+    }
+    public static Builder newBuilder(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse prototype) {
+      return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+    }
+    public Builder toBuilder() {
+      return this == DEFAULT_INSTANCE
+          ? new Builder() : new Builder().mergeFrom(this);
+    }
+
+    @java.lang.Override
+    protected Builder newBuilderForType(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+      Builder builder = new Builder(parent);
+      return builder;
+    }
+    /**
+     * Protobuf type {@code hbase.pb.ListLocksResponse}
+     */
+    public static final class Builder extends
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements
+        // @@protoc_insertion_point(builder_implements:hbase.pb.ListLocksResponse)
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponseOrBuilder {
+      public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptor() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor;
+      }
+
+      protected org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+          internalGetFieldAccessorTable() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_fieldAccessorTable
+            .ensureFieldAccessorsInitialized(
+                org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.Builder.class);
+      }
+
+      // Construct using org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.newBuilder()
+      private Builder() {
+        maybeForceBuilderInitialization();
+      }
+
+      private Builder(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+        super(parent);
+        maybeForceBuilderInitialization();
+      }
+      private void maybeForceBuilderInitialization() {
+        if (org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3
+                .alwaysUseFieldBuilders) {
+          getLockFieldBuilder();
+        }
+      }
+      public Builder clear() {
+        super.clear();
+        if (lockBuilder_ == null) {
+          lock_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+        } else {
+          lockBuilder_.clear();
+        }
+        return this;
+      }
+
+      public org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+          getDescriptorForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksResponse_descriptor;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstanceForType() {
+        return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance();
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse build() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse result = buildPartial();
+        if (!result.isInitialized()) {
+          throw newUninitializedMessageException(result);
+        }
+        return result;
+      }
+
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse buildPartial() {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse result = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse(this);
+        int from_bitField0_ = bitField0_;
+        if (lockBuilder_ == null) {
+          if (((bitField0_ & 0x00000001) == 0x00000001)) {
+            lock_ = java.util.Collections.unmodifiableList(lock_);
+            bitField0_ = (bitField0_ & ~0x00000001);
+          }
+          result.lock_ = lock_;
+        } else {
+          result.lock_ = lockBuilder_.build();
+        }
+        onBuilt();
+        return result;
+      }
+
+      public Builder clone() {
+        return (Builder) super.clone();
+      }
+      public Builder setField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.setField(field, value);
+      }
+      public Builder clearField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field) {
+        return (Builder) super.clearField(field);
+      }
+      public Builder clearOneof(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+        return (Builder) super.clearOneof(oneof);
+      }
+      public Builder setRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          int index, Object value) {
+        return (Builder) super.setRepeatedField(field, index, value);
+      }
+      public Builder addRepeatedField(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.FieldDescriptor field,
+          Object value) {
+        return (Builder) super.addRepeatedField(field, value);
+      }
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message other) {
+        if (other instanceof org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) {
+          return mergeFrom((org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse)other);
+        } else {
+          super.mergeFrom(other);
+          return this;
+        }
+      }
+
+      public Builder mergeFrom(org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse other) {
+        if (other == org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance()) return this;
+        if (lockBuilder_ == null) {
+          if (!other.lock_.isEmpty()) {
+            if (lock_.isEmpty()) {
+              lock_ = other.lock_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+            } else {
+              ensureLockIsMutable();
+              lock_.addAll(other.lock_);
+            }
+            onChanged();
+          }
+        } else {
+          if (!other.lock_.isEmpty()) {
+            if (lockBuilder_.isEmpty()) {
+              lockBuilder_.dispose();
+              lockBuilder_ = null;
+              lock_ = other.lock_;
+              bitField0_ = (bitField0_ & ~0x00000001);
+              lockBuilder_ = 
+                org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ?
+                   getLockFieldBuilder() : null;
+            } else {
+              lockBuilder_.addAllMessages(other.lock_);
+            }
+          }
+        }
+        this.mergeUnknownFields(other.unknownFields);
+        onChanged();
+        return this;
+      }
+
+      public final boolean isInitialized() {
+        for (int i = 0; i < getLockCount(); i++) {
+          if (!getLock(i).isInitialized()) {
+            return false;
+          }
+        }
+        return true;
+      }
+
+      public Builder mergeFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws java.io.IOException {
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse parsedMessage = null;
+        try {
+          parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
+        } catch (org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException e) {
+          parsedMessage = (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) e.getUnfinishedMessage();
+          throw e.unwrapIOException();
+        } finally {
+          if (parsedMessage != null) {
+            mergeFrom(parsedMessage);
+          }
+        }
+        return this;
+      }
+      private int bitField0_;
+
+      private java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> lock_ =
+        java.util.Collections.emptyList();
+      private void ensureLockIsMutable() {
+        if (!((bitField0_ & 0x00000001) == 0x00000001)) {
+          lock_ = new java.util.ArrayList<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo>(lock_);
+          bitField0_ |= 0x00000001;
+         }
+      }
+
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> lockBuilder_;
+
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> getLockList() {
+        if (lockBuilder_ == null) {
+          return java.util.Collections.unmodifiableList(lock_);
+        } else {
+          return lockBuilder_.getMessageList();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public int getLockCount() {
+        if (lockBuilder_ == null) {
+          return lock_.size();
+        } else {
+          return lockBuilder_.getCount();
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo getLock(int index) {
+        if (lockBuilder_ == null) {
+          return lock_.get(index);
+        } else {
+          return lockBuilder_.getMessage(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder setLock(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) {
+        if (lockBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureLockIsMutable();
+          lock_.set(index, value);
+          onChanged();
+        } else {
+          lockBuilder_.setMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder setLock(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) {
+        if (lockBuilder_ == null) {
+          ensureLockIsMutable();
+          lock_.set(index, builderForValue.build());
+          onChanged();
+        } else {
+          lockBuilder_.setMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder addLock(org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) {
+        if (lockBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureLockIsMutable();
+          lock_.add(value);
+          onChanged();
+        } else {
+          lockBuilder_.addMessage(value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder addLock(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo value) {
+        if (lockBuilder_ == null) {
+          if (value == null) {
+            throw new NullPointerException();
+          }
+          ensureLockIsMutable();
+          lock_.add(index, value);
+          onChanged();
+        } else {
+          lockBuilder_.addMessage(index, value);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder addLock(
+          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) {
+        if (lockBuilder_ == null) {
+          ensureLockIsMutable();
+          lock_.add(builderForValue.build());
+          onChanged();
+        } else {
+          lockBuilder_.addMessage(builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder addLock(
+          int index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder builderForValue) {
+        if (lockBuilder_ == null) {
+          ensureLockIsMutable();
+          lock_.add(index, builderForValue.build());
+          onChanged();
+        } else {
+          lockBuilder_.addMessage(index, builderForValue.build());
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder addAllLock(
+          java.lang.Iterable<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo> values) {
+        if (lockBuilder_ == null) {
+          ensureLockIsMutable();
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractMessageLite.Builder.addAll(
+              values, lock_);
+          onChanged();
+        } else {
+          lockBuilder_.addAllMessages(values);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder clearLock() {
+        if (lockBuilder_ == null) {
+          lock_ = java.util.Collections.emptyList();
+          bitField0_ = (bitField0_ & ~0x00000001);
+          onChanged();
+        } else {
+          lockBuilder_.clear();
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public Builder removeLock(int index) {
+        if (lockBuilder_ == null) {
+          ensureLockIsMutable();
+          lock_.remove(index);
+          onChanged();
+        } else {
+          lockBuilder_.remove(index);
+        }
+        return this;
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder getLockBuilder(
+          int index) {
+        return getLockFieldBuilder().getBuilder(index);
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder getLockOrBuilder(
+          int index) {
+        if (lockBuilder_ == null) {
+          return lock_.get(index);  } else {
+          return lockBuilder_.getMessageOrBuilder(index);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public java.util.List<? extends org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> 
+           getLockOrBuilderList() {
+        if (lockBuilder_ != null) {
+          return lockBuilder_.getMessageOrBuilderList();
+        } else {
+          return java.util.Collections.unmodifiableList(lock_);
+        }
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder addLockBuilder() {
+        return getLockFieldBuilder().addBuilder(
+            org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder addLockBuilder(
+          int index) {
+        return getLockFieldBuilder().addBuilder(
+            index, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.getDefaultInstance());
+      }
+      /**
+       * <code>repeated .hbase.pb.LockInfo lock = 1;</code>
+       */
+      public java.util.List<org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder> 
+           getLockBuilderList() {
+        return getLockFieldBuilder().getBuilderList();
+      }
+      private org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+          org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder> 
+          getLockFieldBuilder() {
+        if (lockBuilder_ == null) {
+          lockBuilder_ = new org.apache.hadoop.hbase.shaded.com.google.protobuf.RepeatedFieldBuilderV3<
+              org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfo.Builder, org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockInfoOrBuilder>(
+                  lock_,
+                  ((bitField0_ & 0x00000001) == 0x00000001),
+                  getParentForChildren(),
+                  isClean());
+          lock_ = null;
+        }
+        return lockBuilder_;
+      }
+      public final Builder setUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.setUnknownFields(unknownFields);
+      }
+
+      public final Builder mergeUnknownFields(
+          final org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet unknownFields) {
+        return super.mergeUnknownFields(unknownFields);
+      }
+
+
+      // @@protoc_insertion_point(builder_scope:hbase.pb.ListLocksResponse)
+    }
+
+    // @@protoc_insertion_point(class_scope:hbase.pb.ListLocksResponse)
+    private static final org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse DEFAULT_INSTANCE;
+    static {
+      DEFAULT_INSTANCE = new org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse();
+    }
+
+    public static org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstance() {
+      return DEFAULT_INSTANCE;
+    }
+
+    @java.lang.Deprecated public static final org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksResponse>
+        PARSER = new org.apache.hadoop.hbase.shaded.com.google.protobuf.AbstractParser<ListLocksResponse>() {
+      public ListLocksResponse parsePartialFrom(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream input,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException {
+          return new ListLocksResponse(input, extensionRegistry);
+      }
+    };
+
+    public static org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksResponse> parser() {
+      return PARSER;
+    }
+
+    @java.lang.Override
+    public org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser<ListLocksResponse> getParserForType() {
+      return PARSER;
+    }
+
+    public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse getDefaultInstanceForType() {
+      return DEFAULT_INSTANCE;
+    }
+
+  }
+
   public interface SetQuotaRequestOrBuilder extends
       // @@protoc_insertion_point(interface_extends:hbase.pb.SetQuotaRequest)
       org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -70841,6 +71968,14 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
 
       /**
+       * <code>rpc ListLocks(.hbase.pb.ListLocksRequest) returns (.hbase.pb.ListLocksResponse);</code>
+       */
+      public abstract void listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse> done);
+
+      /**
        * <pre>
        ** Add a replication peer 
        * </pre>
@@ -71446,6 +72581,14 @@ public final class MasterProtos {
         }
 
         @java.lang.Override
+        public  void listLocks(
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse> done) {
+          impl.listLocks(controller, request, done);
+        }
+
+        @java.lang.Override
         public  void addReplicationPeer(
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
             org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
@@ -71668,24 +72811,26 @@ public final class MasterProtos {
             case 59:
               return impl.listProcedures(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest)request);
             case 60:
-              return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
+              return impl.listLocks(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)request);
             case 61:
-              return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
+              return impl.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request);
             case 62:
-              return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request);
+              return impl.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request);
             case 63:
-              return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
+              return impl.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request);
             case 64:
-              return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request);
+              return impl.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request);
             case 65:
-              return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
+              return impl.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request);
             case 66:
-              return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
+              return impl.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request);
             case 67:
-              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
+              return impl.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request);
             case 68:
-              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+              return impl.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request);
             case 69:
+              return impl.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request);
+            case 70:
               return impl.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request);
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71822,24 +72967,26 @@ public final class MasterProtos {
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
             case 60:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance();
             case 61:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
             case 66:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
             case 67:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
             case 68:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
             case 69:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+            case 70:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -71976,24 +73123,26 @@ public final class MasterProtos {
             case 59:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
             case 60:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance();
             case 61:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
             case 62:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
             case 63:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
             case 64:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
             case 65:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
             case 66:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
             case 67:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
             case 68:
-              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
             case 69:
+              return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+            case 70:
               return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
             default:
               throw new java.lang.AssertionError("Can't get here.");
@@ -72755,6 +73904,14 @@ public final class MasterProtos {
         org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse> done);
 
     /**
+     * <code>rpc ListLocks(.hbase.pb.ListLocksRequest) returns (.hbase.pb.ListLocksResponse);</code>
+     */
+    public abstract void listLocks(
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+        org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+        org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse> done);
+
+    /**
      * <pre>
      ** Add a replication peer 
      * </pre>
@@ -73197,51 +74354,56 @@ public final class MasterProtos {
               done));
           return;
         case 60:
+          this.listLocks(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest)request,
+            org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse>specializeCallback(
+              done));
+          return;
+        case 61:
           this.addReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse>specializeCallback(
               done));
           return;
-        case 61:
+        case 62:
           this.removeReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse>specializeCallback(
               done));
           return;
-        case 62:
+        case 63:
           this.enableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse>specializeCallback(
               done));
           return;
-        case 63:
+        case 64:
           this.disableReplicationPeer(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse>specializeCallback(
               done));
           return;
-        case 64:
+        case 65:
           this.getReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse>specializeCallback(
               done));
           return;
-        case 65:
+        case 66:
           this.updateReplicationPeerConfig(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse>specializeCallback(
               done));
           return;
-        case 66:
+        case 67:
           this.listReplicationPeers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse>specializeCallback(
               done));
           return;
-        case 67:
+        case 68:
           this.listDrainingRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse>specializeCallback(
               done));
           return;
-        case 68:
+        case 69:
           this.drainRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse>specializeCallback(
               done));
           return;
-        case 69:
+        case 70:
           this.removeDrainFromRegionServers(controller, (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest)request,
             org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse>specializeCallback(
               done));
@@ -73381,24 +74543,26 @@ public final class MasterProtos {
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest.getDefaultInstance();
         case 60:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.getDefaultInstance();
         case 61:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest.getDefaultInstance();
         case 66:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest.getDefaultInstance();
         case 67:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest.getDefaultInstance();
         case 68:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest.getDefaultInstance();
         case 69:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest.getDefaultInstance();
+        case 70:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -73535,24 +74699,26 @@ public final class MasterProtos {
         case 59:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance();
         case 60:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance();
         case 61:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance();
         case 62:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance();
         case 63:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance();
         case 64:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance();
         case 65:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance();
         case 66:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance();
         case 67:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance();
         case 68:
-          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance();
         case 69:
+          return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance();
+        case 70:
           return org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance();
         default:
           throw new java.lang.AssertionError("Can't get here.");
@@ -74475,12 +75641,27 @@ public final class MasterProtos {
             org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse.getDefaultInstance()));
       }
 
+      public  void listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request,
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse> done) {
+        channel.callMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance(),
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcUtil.generalizeCallback(
+            done,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.class,
+            org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance()));
+      }
+
       public  void addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(60),
+          getDescriptor().getMethods().get(61),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance(),
@@ -74495,7 +75676,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(61),
+          getDescriptor().getMethods().get(62),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance(),
@@ -74510,7 +75691,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(63),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance(),
@@ -74525,7 +75706,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance(),
@@ -74540,7 +75721,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance(),
@@ -74555,7 +75736,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse.getDefaultInstance(),
@@ -74570,7 +75751,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(66),
+          getDescriptor().getMethods().get(67),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.ListReplicationPeersResponse.getDefaultInstance(),
@@ -74585,7 +75766,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(67),
+          getDescriptor().getMethods().get(68),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse.getDefaultInstance(),
@@ -74600,7 +75781,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(68),
+          getDescriptor().getMethods().get(69),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse.getDefaultInstance(),
@@ -74615,7 +75796,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest request,
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse> done) {
         channel.callMethod(
-          getDescriptor().getMethods().get(69),
+          getDescriptor().getMethods().get(70),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse.getDefaultInstance(),
@@ -74932,6 +76113,11 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
@@ -75710,12 +76896,24 @@ public final class MasterProtos {
       }
 
 
+      public org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse listLocks(
+          org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest request)
+          throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
+        return (org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse) channel.callBlockingMethod(
+          getDescriptor().getMethods().get(60),
+          controller,
+          request,
+          org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse.getDefaultInstance());
+      }
+
+
       public org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse addReplicationPeer(
           org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController controller,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(60),
+          getDescriptor().getMethods().get(61),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse.getDefaultInstance());
@@ -75727,7 +76925,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(61),
+          getDescriptor().getMethods().get(62),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse.getDefaultInstance());
@@ -75739,7 +76937,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(62),
+          getDescriptor().getMethods().get(63),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse.getDefaultInstance());
@@ -75751,7 +76949,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(63),
+          getDescriptor().getMethods().get(64),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse.getDefaultInstance());
@@ -75763,7 +76961,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(64),
+          getDescriptor().getMethods().get(65),
           controller,
           request,
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse.getDefaultInstance());
@@ -75775,7 +76973,7 @@ public final class MasterProtos {
           org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest request)
           throws org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException {
         return (org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse) channel.callBlockingMethod(
-          getDescriptor().getMethods().get(65),
+          getDescriptor().getMethods().get(66),
           controlle

<TRUNCATED>

[4/4] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

Posted by st...@apache.org.
HBASE-15143 Procedure v2 - Web UI displaying queues

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25575064
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25575064
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25575064

Branch: refs/heads/master
Commit: 25575064154fe1cc7ff8970e8f15a3cff648f37a
Parents: 1367519
Author: Balazs Meszaros <ba...@cloudera.com>
Authored: Mon Feb 13 13:50:56 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Apr 25 09:39:28 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |    9 +
 .../hbase/client/ConnectionImplementation.java  |   11 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   42 +-
 .../client/ShortCircuitMasterConnection.java    |    6 +
 .../hbase/shaded/protobuf/ProtobufUtil.java     |  183 +-
 .../hadoop/hbase/procedure2/LockInfo.java       |  128 +
 .../hadoop/hbase/procedure2/LockAndQueue.java   |   21 +-
 .../hadoop/hbase/procedure2/LockStatus.java     |    1 +
 .../hbase/procedure2/ProcedureScheduler.java    |    7 +
 .../hadoop/hbase/procedure2/ProcedureUtil.java  |    4 +-
 .../procedure2/SimpleProcedureScheduler.java    |   10 +-
 .../protobuf/generated/LockServiceProtos.java   | 2423 +++++++++++++++++-
 .../shaded/protobuf/generated/MasterProtos.java | 2152 ++++++++++++----
 .../src/main/protobuf/LockService.proto         |   22 +
 .../src/main/protobuf/Master.proto              |   11 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon    |    2 +-
 .../hbase/coprocessor/MasterObserver.java       |   19 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   37 +-
 .../hbase/master/MasterCoprocessorHost.java     |   21 +
 .../hadoop/hbase/master/MasterRpcServices.java  |  147 +-
 .../hadoop/hbase/master/MasterServices.java     |    9 +-
 .../hbase/master/locking/LockProcedure.java     |    8 +-
 .../procedure/MasterProcedureScheduler.java     |  119 +-
 .../hbase-webapps/master/procedures.jsp         |  127 +-
 .../resources/hbase-webapps/master/snapshot.jsp |    2 +-
 .../hbase-webapps/master/snapshotsStats.jsp     |    2 +-
 .../resources/hbase-webapps/master/table.jsp    |    2 +-
 .../hbase-webapps/master/tablesDetailed.jsp     |    2 +-
 .../main/resources/hbase-webapps/master/zk.jsp  |    2 +-
 .../hbase/coprocessor/TestMasterObserver.java   |   38 +
 .../hbase/master/MockNoopMasterServices.java    |    9 +-
 .../procedure/TestMasterProcedureScheduler.java |  169 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   41 +-
 .../hbase/shaded/protobuf/TestProtobufUtil.java |  151 ++
 hbase-shell/src/main/ruby/hbase/admin.rb        |    5 +
 hbase-shell/src/main/ruby/shell.rb              |    3 +-
 hbase-shell/src/main/ruby/shell/commands.rb     |    5 +
 .../src/main/ruby/shell/commands/list_locks.rb  |   60 +
 hbase-shell/src/main/ruby/shell/formatter.rb    |    9 +-
 .../src/test/ruby/shell/list_locks_test.rb      |  152 ++
 40 files changed, 5409 insertions(+), 762 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f2fc9a5..3e767d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
@@ -1250,6 +1251,14 @@ public interface Admin extends Abortable, Closeable {
       throws IOException;
 
   /**
+   * List locks.
+   * @return lock list
+   * @throws IOException if a remote or network exception occurs
+   */
+  LockInfo[] listLocks()
+      throws IOException;
+
+  /**
    * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file.
    *
    * Note that the actual rolling of the log writer is asynchronous and may not be complete when

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index 99feb14..6859cb3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -25,8 +25,6 @@ import static org.apache.hadoop.hbase.client.MetricsConnection.CLIENT_SIDE_METRI
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 import static org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsentEx;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.InterruptedIOException;
@@ -120,6 +118,8 @@ import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.zookeeper.KeeperException;
 
+import com.google.common.annotations.VisibleForTesting;
+
 import edu.umd.cs.findbugs.annotations.Nullable;
 
 /**
@@ -1283,6 +1283,13 @@ class ConnectionImplementation implements ClusterConnection, Closeable {
       }
 
       @Override
+      public MasterProtos.ListLocksResponse listLocks(
+          RpcController controller,
+          MasterProtos.ListLocksRequest request) throws ServiceException {
+        return stub.listLocks(controller, request);
+      }
+
+      @Override
       public MasterProtos.AddColumnResponse addColumn(
           RpcController controller,
           MasterProtos.AddColumnRequest request) throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index e55a95d..7e79c20 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -25,7 +25,6 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -80,6 +79,7 @@ import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.HBaseRpcController;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
@@ -110,6 +110,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringP
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
@@ -151,6 +152,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedur
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
@@ -191,7 +194,6 @@ import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
-import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
 import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
@@ -201,7 +203,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.zookeeper.KeeperException;
 
 import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Lists;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.Message;
 import com.google.protobuf.RpcController;
@@ -2096,26 +2097,33 @@ public class HBaseAdmin implements Admin {
             getRpcController(), ListProceduresRequest.newBuilder().build()).getProcedureList();
         ProcedureInfo[] procInfoList = new ProcedureInfo[procList.size()];
         for (int i = 0; i < procList.size(); i++) {
-          procInfoList[i] = convert(procList.get(i));
+          procInfoList[i] = ProtobufUtil.toProcedureInfo(procList.get(i));
         }
         return procInfoList;
       }
     });
   }
 
-  private static ProcedureInfo convert(final ProcedureProtos.Procedure procProto) {
-    NonceKey nonceKey = null;
-    if (procProto.getNonce() != HConstants.NO_NONCE) {
-      nonceKey = new NonceKey(procProto.getNonceGroup(), procProto.getNonce());
-    }
-    org.apache.hadoop.hbase.ProcedureState procedureState =
-        org.apache.hadoop.hbase.ProcedureState.valueOf(procProto.getState().name());
-    return new ProcedureInfo(procProto.getProcId(), procProto.getClassName(), procProto.getOwner(),
-        procedureState, procProto.hasParentId() ? procProto.getParentId() : -1, nonceKey,
-            procProto.hasException()?
-                ForeignExceptionUtil.toIOException(procProto.getException()): null,
-            procProto.getLastUpdate(), procProto.getSubmittedTime(),
-            procProto.hasResult()? procProto.getResult().toByteArray() : null);
+  @Override
+  public LockInfo[] listLocks() throws IOException {
+    return executeCallable(new MasterCallable<LockInfo[]>(getConnection(),
+        getRpcControllerFactory()) {
+      @Override
+      protected LockInfo[] rpcCall() throws Exception {
+        ListLocksRequest request = ListLocksRequest.newBuilder().build();
+        ListLocksResponse response = master.listLocks(getRpcController(), request);
+        List<LockServiceProtos.LockInfo> locksProto = response.getLockList();
+
+        LockInfo[] locks = new LockInfo[locksProto.size()];
+
+        for (int i = 0; i < locks.length; i++) {
+          LockServiceProtos.LockInfo lockProto = locksProto.get(i);
+          locks[i] = ProtobufUtil.toLockInfo(lockProto);
+        }
+
+        return locks;
+      }
+    });
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
index 72b2a15..e3b5b12 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ShortCircuitMasterConnection.java
@@ -191,6 +191,12 @@ public class ShortCircuitMasterConnection implements MasterKeepAliveConnection {
   }
 
   @Override
+  public ListLocksResponse listLocks(RpcController controller,
+      ListLocksRequest request) throws ServiceException {
+    return stub.listLocks(controller, request);
+  }
+
+  @Override
   public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller,
       ListNamespaceDescriptorsRequest request) throws ServiceException {
     return stub.listNamespaceDescriptors(controller, request);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index e969ded..04ce040 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -53,6 +53,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -82,6 +84,7 @@ import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.LimitInputStream;
 import org.apache.hadoop.hbase.io.TimeRange;
 import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
 import org.apache.hadoop.hbase.quotas.QuotaScope;
 import org.apache.hadoop.hbase.quotas.QuotaType;
@@ -145,11 +148,14 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.Procedure;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerStartupRequest;
@@ -166,7 +172,9 @@ import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.DynamicClassLoader;
 import org.apache.hadoop.hbase.util.ExceptionUtil;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.Methods;
+import org.apache.hadoop.hbase.util.NonceKey;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hadoop.ipc.RemoteException;
 
@@ -3262,4 +3270,177 @@ public final class ProtobufUtil {
     int port = Addressing.parsePort(str);
     return ServerName.valueOf(hostname, port, -1L);
   }
-}
\ No newline at end of file
+
+  /**
+   * @return Convert the current {@link ProcedureInfo} into a Protocol Buffers Procedure
+   * instance.
+   */
+  public static ProcedureProtos.Procedure toProtoProcedure(ProcedureInfo procedure) {
+    ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder();
+
+    builder.setClassName(procedure.getProcName());
+    builder.setProcId(procedure.getProcId());
+    builder.setSubmittedTime(procedure.getSubmittedTime());
+    builder.setState(ProcedureProtos.ProcedureState.valueOf(procedure.getProcState().name()));
+    builder.setLastUpdate(procedure.getLastUpdate());
+
+    if (procedure.hasParentId()) {
+      builder.setParentId(procedure.getParentId());
+    }
+
+    if (procedure.hasOwner()) {
+      builder.setOwner(procedure.getProcOwner());
+    }
+
+    if (procedure.isFailed()) {
+      builder.setException(ForeignExceptionUtil.toProtoForeignException(procedure.getException()));
+    }
+
+    if (procedure.hasResultData()) {
+      builder.setResult(UnsafeByteOperations.unsafeWrap(procedure.getResult()));
+    }
+
+    return builder.build();
+  }
+
+  /**
+   * Helper to convert the protobuf object.
+   * @return Convert the current Protocol Buffers Procedure to {@link ProcedureInfo}
+   * instance.
+   */
+  public static ProcedureInfo toProcedureInfo(ProcedureProtos.Procedure procedureProto) {
+    NonceKey nonceKey = null;
+
+    if (procedureProto.getNonce() != HConstants.NO_NONCE) {
+      nonceKey = new NonceKey(procedureProto.getNonceGroup(), procedureProto.getNonce());
+    }
+
+    return new ProcedureInfo(procedureProto.getProcId(), procedureProto.getClassName(),
+        procedureProto.hasOwner() ? procedureProto.getOwner() : null,
+        ProcedureState.valueOf(procedureProto.getState().name()),
+        procedureProto.hasParentId() ? procedureProto.getParentId() : -1, nonceKey,
+        procedureProto.hasException() ?
+          ForeignExceptionUtil.toIOException(procedureProto.getException()) : null,
+        procedureProto.getLastUpdate(), procedureProto.getSubmittedTime(),
+        procedureProto.hasResult() ? procedureProto.getResult().toByteArray() : null);
+  }
+
+  public static LockServiceProtos.ResourceType toProtoResourceType(
+      LockInfo.ResourceType resourceType) {
+    switch (resourceType) {
+    case SERVER:
+      return LockServiceProtos.ResourceType.RESOURCE_TYPE_SERVER;
+    case NAMESPACE:
+      return LockServiceProtos.ResourceType.RESOURCE_TYPE_NAMESPACE;
+    case TABLE:
+      return LockServiceProtos.ResourceType.RESOURCE_TYPE_TABLE;
+    case REGION:
+      return LockServiceProtos.ResourceType.RESOURCE_TYPE_REGION;
+    default:
+      throw new IllegalArgumentException("Unknown resource type: " + resourceType);
+    }
+  }
+
+  public static LockInfo.ResourceType toResourceType(
+      LockServiceProtos.ResourceType resourceTypeProto) {
+    switch (resourceTypeProto) {
+    case RESOURCE_TYPE_SERVER:
+      return LockInfo.ResourceType.SERVER;
+    case RESOURCE_TYPE_NAMESPACE:
+      return LockInfo.ResourceType.NAMESPACE;
+    case RESOURCE_TYPE_TABLE:
+      return LockInfo.ResourceType.TABLE;
+    case RESOURCE_TYPE_REGION:
+      return LockInfo.ResourceType.REGION;
+    default:
+      throw new IllegalArgumentException("Unknown resource type: " + resourceTypeProto);
+    }
+  }
+
+  public static LockServiceProtos.LockType toProtoLockType(
+      LockInfo.LockType lockType) {
+    return LockServiceProtos.LockType.valueOf(lockType.name());
+  }
+
+  public static LockInfo.LockType toLockType(
+      LockServiceProtos.LockType lockTypeProto) {
+    return LockInfo.LockType.valueOf(lockTypeProto.name());
+  }
+
+  public static LockServiceProtos.WaitingProcedure toProtoWaitingProcedure(
+      LockInfo.WaitingProcedure waitingProcedure) {
+    LockServiceProtos.WaitingProcedure.Builder builder = LockServiceProtos.WaitingProcedure.newBuilder();
+
+    ProcedureProtos.Procedure procedureProto =
+        toProtoProcedure(waitingProcedure.getProcedure());
+
+    builder
+        .setLockType(toProtoLockType(waitingProcedure.getLockType()))
+        .setProcedure(procedureProto);
+
+    return builder.build();
+  }
+
+  public static LockInfo.WaitingProcedure toWaitingProcedure(
+      LockServiceProtos.WaitingProcedure waitingProcedureProto) {
+    LockInfo.WaitingProcedure waiting = new LockInfo.WaitingProcedure();
+
+    waiting.setLockType(toLockType(waitingProcedureProto.getLockType()));
+
+    ProcedureInfo procedure =
+        toProcedureInfo(waitingProcedureProto.getProcedure());
+    waiting.setProcedure(procedure);
+
+    return waiting;
+  }
+
+  public static LockServiceProtos.LockInfo toProtoLockInfo(LockInfo lock)
+  {
+    LockServiceProtos.LockInfo.Builder builder = LockServiceProtos.LockInfo.newBuilder();
+
+    builder
+        .setResourceType(toProtoResourceType(lock.getResourceType()))
+        .setResourceName(lock.getResourceName())
+        .setLockType(toProtoLockType(lock.getLockType()));
+
+    ProcedureInfo exclusiveLockOwnerProcedure = lock.getExclusiveLockOwnerProcedure();
+
+    if (exclusiveLockOwnerProcedure != null) {
+      Procedure exclusiveLockOwnerProcedureProto =
+          toProtoProcedure(lock.getExclusiveLockOwnerProcedure());
+      builder.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto);
+    }
+
+    builder.setSharedLockCount(lock.getSharedLockCount());
+
+    for (LockInfo.WaitingProcedure waitingProcedure : lock.getWaitingProcedures()) {
+      builder.addWaitingProcedures(toProtoWaitingProcedure(waitingProcedure));
+    }
+
+    return builder.build();
+  }
+
+  public static LockInfo toLockInfo(LockServiceProtos.LockInfo lockProto)
+  {
+    LockInfo lock = new LockInfo();
+
+    lock.setResourceType(toResourceType(lockProto.getResourceType()));
+    lock.setResourceName(lockProto.getResourceName());
+    lock.setLockType(toLockType(lockProto.getLockType()));
+
+    if (lockProto.hasExclusiveLockOwnerProcedure()) {
+      ProcedureInfo exclusiveLockOwnerProcedureProto =
+          toProcedureInfo(lockProto.getExclusiveLockOwnerProcedure());
+
+      lock.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureProto);
+    }
+
+    lock.setSharedLockCount(lockProto.getSharedLockCount());
+
+    for (LockServiceProtos.WaitingProcedure waitingProcedureProto : lockProto.getWaitingProceduresList()) {
+      lock.addWaitingProcedure(toWaitingProcedure(waitingProcedureProto));
+    }
+
+    return lock;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java
new file mode 100644
index 0000000..30ecee8
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/procedure2/LockInfo.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.procedure2;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+@InterfaceAudience.Public
+public class LockInfo {
+  @InterfaceAudience.Public
+  public enum ResourceType {
+    SERVER, NAMESPACE, TABLE, REGION
+  }
+
+  @InterfaceAudience.Public
+  public enum LockType {
+    EXCLUSIVE, SHARED
+  }
+
+  @InterfaceAudience.Public
+  public static class WaitingProcedure {
+    private LockType lockType;
+    private ProcedureInfo procedure;
+
+    public WaitingProcedure() {
+    }
+
+    public LockType getLockType() {
+      return lockType;
+    }
+
+    public void setLockType(LockType lockType) {
+      this.lockType = lockType;
+    }
+
+    public ProcedureInfo getProcedure() {
+      return procedure;
+    }
+
+    public void setProcedure(ProcedureInfo procedure) {
+      this.procedure = procedure;
+    }
+  }
+
+  private ResourceType resourceType;
+  private String resourceName;
+  private LockType lockType;
+  private ProcedureInfo exclusiveLockOwnerProcedure;
+  private int sharedLockCount;
+  private final List<WaitingProcedure> waitingProcedures;
+
+  public LockInfo() {
+    waitingProcedures = new ArrayList<>();
+  }
+
+  public ResourceType getResourceType() {
+    return resourceType;
+  }
+
+  public void setResourceType(ResourceType resourceType) {
+    this.resourceType = resourceType;
+  }
+
+  public String getResourceName() {
+    return resourceName;
+  }
+
+  public void setResourceName(String resourceName) {
+    this.resourceName = resourceName;
+  }
+
+  public LockType getLockType() {
+    return lockType;
+  }
+
+  public void setLockType(LockType lockType) {
+    this.lockType = lockType;
+  }
+
+  public ProcedureInfo getExclusiveLockOwnerProcedure() {
+    return exclusiveLockOwnerProcedure;
+  }
+
+  public void setExclusiveLockOwnerProcedure(
+      ProcedureInfo exclusiveLockOwnerProcedure) {
+    this.exclusiveLockOwnerProcedure = exclusiveLockOwnerProcedure;
+  }
+
+  public int getSharedLockCount() {
+    return sharedLockCount;
+  }
+
+  public void setSharedLockCount(int sharedLockCount) {
+    this.sharedLockCount = sharedLockCount;
+  }
+
+  public List<WaitingProcedure> getWaitingProcedures() {
+    return waitingProcedures;
+  }
+
+  public void setWaitingProcedures(List<WaitingProcedure> waitingProcedures) {
+    this.waitingProcedures.clear();
+    this.waitingProcedures.addAll(waitingProcedures);
+  }
+
+  public void addWaitingProcedure(WaitingProcedure waitingProcedure) {
+    waitingProcedures.add(waitingProcedure);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
index e11c23c..2c307b7 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java
@@ -43,7 +43,7 @@ package org.apache.hadoop.hbase.procedure2;
  * We do not use ReentrantReadWriteLock directly because of its high memory overhead.
  */
 public class LockAndQueue extends ProcedureDeque implements LockStatus {
-  private long exclusiveLockProcIdOwner = Long.MIN_VALUE;
+  private Procedure<?> exclusiveLockOwnerProcedure = null;
   private int sharedLock = 0;
 
   // ======================================================================
@@ -57,12 +57,12 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
 
   @Override
   public boolean hasExclusiveLock() {
-    return this.exclusiveLockProcIdOwner != Long.MIN_VALUE;
+    return this.exclusiveLockOwnerProcedure != null;
   }
 
   @Override
   public boolean isLockOwner(long procId) {
-    return exclusiveLockProcIdOwner == procId;
+    return getExclusiveLockProcIdOwner() == procId;
   }
 
   @Override
@@ -76,8 +76,17 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
   }
 
   @Override
+  public Procedure<?> getExclusiveLockOwnerProcedure() {
+    return exclusiveLockOwnerProcedure;
+  }
+
+  @Override
   public long getExclusiveLockProcIdOwner() {
-    return exclusiveLockProcIdOwner;
+    if (exclusiveLockOwnerProcedure == null) {
+      return Long.MIN_VALUE;
+    } else {
+      return exclusiveLockOwnerProcedure.getProcId();
+    }
   }
 
   @Override
@@ -101,7 +110,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
 
   public boolean tryExclusiveLock(final Procedure proc) {
     if (isLocked()) return hasLockAccess(proc);
-    exclusiveLockProcIdOwner = proc.getProcId();
+    exclusiveLockOwnerProcedure = proc;
     return true;
   }
 
@@ -110,7 +119,7 @@ public class LockAndQueue extends ProcedureDeque implements LockStatus {
    */
   public boolean releaseExclusiveLock(final Procedure proc) {
     if (isLockOwner(proc.getProcId())) {
-      exclusiveLockProcIdOwner = Long.MIN_VALUE;
+      exclusiveLockOwnerProcedure = null;
       return true;
     }
     return false;

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java
index 9f2aae7..f32ef76 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java
@@ -29,6 +29,7 @@ public interface LockStatus {
   boolean isLockOwner(long procId);
   boolean hasParentLock(final Procedure proc);
   boolean hasLockAccess(final Procedure proc);
+  Procedure<?> getExclusiveLockOwnerProcedure();
   long getExclusiveLockProcIdOwner();
   int getSharedLockCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
index 617532b..b5295e7 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.procedure2;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -121,6 +122,12 @@ public interface ProcedureScheduler {
   boolean waitEvent(ProcedureEvent event, Procedure procedure);
 
   /**
+   * List lock queues.
+   * @return the locks
+   */
+  List<LockInfo> listLocks();
+
+  /**
    * Returns the number of elements in this queue.
    * @return the number of elements in this queue.
    */

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
index b4222c7..7ce7568 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hbase.procedure2;
 
-import com.google.common.base.Preconditions;
-
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Modifier;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.NonceKey;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Helper to convert to/from ProcedureProtos
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java
index 788f4ff..176a900 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java
@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.hbase.procedure2;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.util.Collections;
+import java.util.List;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * Simple scheduler for procedures
  */
@@ -73,4 +76,9 @@ public class SimpleProcedureScheduler extends AbstractProcedureScheduler {
   @Override
   public void completionCleanup(Procedure proc) {
   }
+
+  @Override
+  public List<LockInfo> listLocks() {
+    return Collections.emptyList();
+  }
 }
\ No newline at end of file