You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by sy...@apache.org on 2017/04/26 22:52:39 UTC
[32/40] hbase git commit: HBASE-15143 Procedure v2 - Web UI
displaying queues
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/LockService.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index 0df7f2e..1898e68 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
option optimize_for = SPEED;
import "HBase.proto";
+import "Procedure.proto";
enum LockType {
EXCLUSIVE = 1;
@@ -70,6 +71,27 @@ message LockProcedureData {
optional bool is_master_lock = 6 [default = false];
}
+enum ResourceType {
+ RESOURCE_TYPE_SERVER = 1;
+ RESOURCE_TYPE_NAMESPACE = 2;
+ RESOURCE_TYPE_TABLE = 3;
+ RESOURCE_TYPE_REGION = 4;
+}
+
+message WaitingProcedure {
+ required LockType lock_type = 1;
+ required Procedure procedure = 2;
+}
+
+message LockInfo {
+ required ResourceType resource_type = 1;
+ optional string resource_name = 2;
+ required LockType lock_type = 3;
+ optional Procedure exclusive_lock_owner_procedure = 4;
+ optional int32 shared_lock_count = 5;
+ repeated WaitingProcedure waitingProcedures = 6;
+}
+
service LockService {
/** Acquire lock on namespace/table/region */
rpc RequestLock(LockRequest) returns(LockResponse);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index d7d51e2..0c3da02 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -30,6 +30,7 @@ import "HBase.proto";
import "Client.proto";
import "ClusterStatus.proto";
import "ErrorHandling.proto";
+import "LockService.proto";
import "Procedure.proto";
import "Quota.proto";
import "Replication.proto";
@@ -534,6 +535,13 @@ message ListProceduresResponse {
repeated Procedure procedure = 1;
}
+message ListLocksRequest {
+}
+
+message ListLocksResponse {
+ repeated LockInfo lock = 1;
+}
+
message SetQuotaRequest {
optional string user_name = 1;
optional string user_group = 2;
@@ -888,6 +896,9 @@ service MasterService {
rpc ListProcedures(ListProceduresRequest)
returns(ListProceduresResponse);
+ rpc ListLocks(ListLocksRequest)
+ returns(ListLocksResponse);
+
/** Add a replication peer */
rpc AddReplicationPeer(AddReplicationPeerRequest)
returns(AddReplicationPeerResponse);
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112..e1a47c5 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -125,7 +125,7 @@ AssignmentManager assignmentManager = master.getAssignmentManager();
<ul class="nav navbar-nav">
<li class="active"><a href="/">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index aab852c..ad8aa14 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -983,6 +984,24 @@ public interface MasterObserver extends Coprocessor {
List<ProcedureInfo> procInfoList) throws IOException {}
/**
+ * Called before a listLocks request has been processed.
+ * @param ctx the environment to interact with the framework and master
+ * @throws IOException if something went wrong
+ */
+ default void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx)
+ throws IOException {}
+
+ /**
+ * Called after a listLocks request has been processed.
+ * @param ctx the environment to interact with the framework and master
+ * @param lockInfoList the list of locks about to be returned
+ * @throws IOException if something went wrong
+ */
+ default void postListLocks(
+ ObserverContext<MasterCoprocessorEnvironment> ctx,
+ List<LockInfo> lockInfoList) throws IOException {}
+
+ /**
* Called prior to moving a given region from one region server to another.
* @param ctx the environment to interact with the framework and master
* @param region the HRegionInfo
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index f9670e1..e4ba285 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -65,7 +65,6 @@ import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.PleaseHoldException;
import org.apache.hadoop.hbase.ProcedureInfo;
-import org.apache.hadoop.hbase.RegionStateListener;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.ServerLoad;
import org.apache.hadoop.hbase.ServerName;
@@ -113,6 +112,7 @@ import org.apache.hadoop.hbase.master.procedure.DisableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.EnableTableProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureConstants;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
import org.apache.hadoop.hbase.master.procedure.MergeTableRegionsProcedure;
import org.apache.hadoop.hbase.master.procedure.ModifyColumnFamilyProcedure;
@@ -128,6 +128,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
@@ -979,7 +980,7 @@ public class HMaster extends HRegionServer implements MasterServices {
void initQuotaManager() throws IOException {
MasterQuotaManager quotaManager = new MasterQuotaManager(this);
- this.assignmentManager.setRegionStateListener((RegionStateListener)quotaManager);
+ this.assignmentManager.setRegionStateListener(quotaManager);
quotaManager.start();
this.quotaManager = quotaManager;
}
@@ -1141,8 +1142,8 @@ public class HMaster extends HRegionServer implements MasterServices {
procedureStore = new WALProcedureStore(conf, walDir.getFileSystem(conf), walDir,
new MasterProcedureEnv.WALStoreLeaseRecovery(this));
procedureStore.registerListener(new MasterProcedureEnv.MasterProcedureStoreListener(this));
- procedureExecutor = new ProcedureExecutor(conf, procEnv, procedureStore,
- procEnv.getProcedureScheduler());
+ MasterProcedureScheduler procedureScheduler = procEnv.getProcedureScheduler();
+ procedureExecutor = new ProcedureExecutor<>(conf, procEnv, procedureStore, procedureScheduler);
configurationManager.registerObserver(procEnv);
final int numThreads = conf.getInt(MasterProcedureConstants.MASTER_PROCEDURE_THREADS,
@@ -2909,6 +2910,34 @@ public class HMaster extends HRegionServer implements MasterServices {
return procInfoList;
}
+ private Map<Long, ProcedureInfo> getProcedureInfos() {
+ final List<ProcedureInfo> list = procedureExecutor.listProcedures();
+ final Map<Long, ProcedureInfo> map = new HashMap<>();
+
+ for (ProcedureInfo procedureInfo : list) {
+ map.put(procedureInfo.getProcId(), procedureInfo);
+ }
+
+ return map;
+ }
+
+ @Override
+ public List<LockInfo> listLocks() throws IOException {
+ if (cpHost != null) {
+ cpHost.preListLocks();
+ }
+
+ MasterProcedureScheduler procedureScheduler = procedureExecutor.getEnvironment().getProcedureScheduler();
+
+ final List<LockInfo> lockInfoList = procedureScheduler.listLocks();
+
+ if (cpHost != null) {
+ cpHost.postListLocks(lockInfoList);
+ }
+
+ return lockInfoList;
+ }
+
/**
* Returns the list of table descriptors that match the specified request
* @param namespace the namespace to query, or null if querying for all
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 8a7a387..2f5e66e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.metrics.MetricRegistry;
import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.security.User;
@@ -706,6 +707,26 @@ public class MasterCoprocessorHost
});
}
+ public boolean preListLocks() throws IOException {
+ return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+ throws IOException {
+ oserver.preListLocks(ctx);
+ }
+ });
+ }
+
+ public void postListLocks(final List<LockInfo> lockInfoList) throws IOException {
+ execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
+ @Override
+ public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
+ throws IOException {
+ oserver.postListLocks(ctx, lockInfoList);
+ }
+ });
+ }
+
public boolean preMove(final HRegionInfo region, final ServerName srcServer,
final ServerName destServer) throws IOException {
return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 9af8f45..40c4a71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -56,8 +56,8 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
import org.apache.hadoop.hbase.regionserver.RSRpcServices;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
@@ -86,129 +86,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockH
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListProceduresResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionServerReportRequest;
@@ -1108,7 +987,7 @@ public class MasterRpcServices extends RSRpcServices
}
master.getMasterProcedureExecutor().removeResult(request.getProcId());
} else {
- Procedure proc = v.getSecond();
+ Procedure<?> proc = v.getSecond();
if (proc == null) {
builder.setState(GetProcedureResultResponse.State.NOT_FOUND);
} else {
@@ -1160,7 +1039,7 @@ public class MasterRpcServices extends RSRpcServices
try {
final ListProceduresResponse.Builder response = ListProceduresResponse.newBuilder();
for (ProcedureInfo p: master.listProcedures()) {
- response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
+ response.addProcedure(ProtobufUtil.toProtoProcedure(p));
}
return response.build();
} catch (IOException e) {
@@ -1169,6 +1048,23 @@ public class MasterRpcServices extends RSRpcServices
}
@Override
+ public ListLocksResponse listLocks(
+ RpcController controller,
+ ListLocksRequest request) throws ServiceException {
+ try {
+ final ListLocksResponse.Builder builder = ListLocksResponse.newBuilder();
+
+ for (LockInfo lockInfo: master.listLocks()) {
+ builder.addLock(ProtobufUtil.toProtoLockInfo(lockInfo));
+ }
+
+ return builder.build();
+ } catch (IOException e) {
+ throw new ServiceException(e);
+ }
+ }
+
+ @Override
public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(RpcController c,
ListTableDescriptorsByNamespaceRequest request) throws ServiceException {
try {
@@ -1459,7 +1355,6 @@ public class MasterRpcServices extends RSRpcServices
throw new UnknownRegionException(Bytes.toString(regionName));
}
- if (pair == null) throw new UnknownRegionException(Bytes.toString(regionName));
HRegionInfo hri = pair.getFirst();
if (master.cpHost != null) {
if (master.cpHost.preUnassign(hri, force)) {
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 66758f8..4924d72 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -39,12 +39,12 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-
import com.google.protobuf.Service;
/**
@@ -353,6 +353,13 @@ public interface MasterServices extends Server {
public List<ProcedureInfo> listProcedures() throws IOException;
/**
+ * List locks
+ * @return lock list
+ * @throws IOException
+ */
+ public List<LockInfo> listLocks() throws IOException;
+
+ /**
* Get list of table descriptors by namespace
* @param name namespace name
* @return descriptors
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index 3cad51c..512f7e2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -204,6 +204,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
* WALs.
* @return false, so procedure framework doesn't mark this procedure as failure.
*/
+ @Override
protected boolean setTimeoutFailure(final MasterProcedureEnv env) {
synchronized (event) {
if (LOG.isDebugEnabled()) LOG.debug("Timeout failure " + this.event);
@@ -231,7 +232,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
}
@Override
- protected Procedure[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
+ protected Procedure<?>[] execute(final MasterProcedureEnv env) throws ProcedureSuspendedException {
// Local master locks don't store any state, so on recovery, simply finish this procedure
// immediately.
if (recoveredMasterLock) return null;
@@ -334,6 +335,7 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
setState(ProcedureProtos.ProcedureState.RUNNABLE);
}
+ @Override
protected void toStringClassDetails(final StringBuilder builder) {
super.toStringClassDetails(builder);
if (regionInfos != null) {
@@ -350,6 +352,10 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
builder.append(", type=").append(type);
}
+ public LockType getType() {
+ return type;
+ }
+
private LockInterface setupLock() throws IllegalArgumentException {
if (regionInfos != null) {
return setupRegionLock();
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 48a0b62..b0baf85 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -21,27 +21,34 @@ package org.apache.hadoop.hbase.master.procedure;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ProcedureInfo;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
+import org.apache.hadoop.hbase.procedure2.LockAndQueue;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.LockStatus;
import org.apache.hadoop.hbase.procedure2.Procedure;
-import org.apache.hadoop.hbase.procedure2.LockAndQueue;
import org.apache.hadoop.hbase.procedure2.ProcedureDeque;
-import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
import org.apache.hadoop.hbase.util.AvlUtil.AvlIterableList;
+import org.apache.hadoop.hbase.util.AvlUtil.AvlKeyComparator;
import org.apache.hadoop.hbase.util.AvlUtil.AvlLinkedNode;
import org.apache.hadoop.hbase.util.AvlUtil.AvlTree;
import org.apache.hadoop.hbase.util.AvlUtil.AvlTreeIterator;
@@ -226,7 +233,111 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
return pollResult;
}
- @VisibleForTesting
+ private LockInfo createLockInfo(LockInfo.ResourceType resourceType,
+ String resourceName, LockAndQueue queue) {
+ LockInfo info = new LockInfo();
+
+ info.setResourceType(resourceType);
+ info.setResourceName(resourceName);
+
+ if (queue.hasExclusiveLock()) {
+ info.setLockType(LockInfo.LockType.EXCLUSIVE);
+
+ Procedure<?> exclusiveLockOwnerProcedure = queue.getExclusiveLockOwnerProcedure();
+ ProcedureInfo exclusiveLockOwnerProcedureInfo =
+ ProcedureUtil.convertToProcedureInfo(exclusiveLockOwnerProcedure);
+ info.setExclusiveLockOwnerProcedure(exclusiveLockOwnerProcedureInfo);
+ } else if (queue.getSharedLockCount() > 0) {
+ info.setLockType(LockInfo.LockType.SHARED);
+ info.setSharedLockCount(queue.getSharedLockCount());
+ }
+
+ for (Procedure<?> procedure : queue) {
+ if (!(procedure instanceof LockProcedure)) {
+ continue;
+ }
+
+ LockProcedure lockProcedure = (LockProcedure)procedure;
+ LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+
+ switch (lockProcedure.getType()) {
+ case EXCLUSIVE:
+ waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
+ break;
+ case SHARED:
+ waitingProcedure.setLockType(LockInfo.LockType.SHARED);
+ break;
+ }
+
+ ProcedureInfo procedureInfo = ProcedureUtil.convertToProcedureInfo(lockProcedure);
+ waitingProcedure.setProcedure(procedureInfo);
+
+ info.addWaitingProcedure(waitingProcedure);
+ }
+
+ return info;
+ }
+
+ @Override
+ public List<LockInfo> listLocks() {
+ schedLock();
+
+ try {
+ List<LockInfo> lockInfos = new ArrayList<>();
+
+ for (Entry<ServerName, LockAndQueue> entry : locking.serverLocks
+ .entrySet()) {
+ String serverName = entry.getKey().getServerName();
+ LockAndQueue queue = entry.getValue();
+
+ if (queue.isLocked()) {
+ LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.SERVER,
+ serverName, queue);
+ lockInfos.add(lockInfo);
+ }
+ }
+
+ for (Entry<String, LockAndQueue> entry : locking.namespaceLocks
+ .entrySet()) {
+ String namespaceName = entry.getKey();
+ LockAndQueue queue = entry.getValue();
+
+ if (queue.isLocked()) {
+ LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.NAMESPACE,
+ namespaceName, queue);
+ lockInfos.add(lockInfo);
+ }
+ }
+
+ for (Entry<TableName, LockAndQueue> entry : locking.tableLocks
+ .entrySet()) {
+ String tableName = entry.getKey().getNameAsString();
+ LockAndQueue queue = entry.getValue();
+
+ if (queue.isLocked()) {
+ LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.TABLE,
+ tableName, queue);
+ lockInfos.add(lockInfo);
+ }
+ }
+
+ for (Entry<String, LockAndQueue> entry : locking.regionLocks.entrySet()) {
+ String regionName = entry.getKey();
+ LockAndQueue queue = entry.getValue();
+
+ if (queue.isLocked()) {
+ LockInfo lockInfo = createLockInfo(LockInfo.ResourceType.REGION,
+ regionName, queue);
+ lockInfos.add(lockInfo);
+ }
+ }
+
+ return lockInfos;
+ } finally {
+ schedUnlock();
+ }
+ }
+
@Override
public void clear() {
schedLock();
@@ -390,6 +501,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
super(serverName, serverLock);
}
+ @Override
public boolean requireExclusiveLock(Procedure proc) {
ServerProcedureInterface spi = (ServerProcedureInterface)proc;
switch (spi.getServerOperationType()) {
@@ -437,6 +549,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
return true;
}
+ @Override
public boolean requireExclusiveLock(Procedure proc) {
return requireTableExclusiveLock((TableProcedureInterface)proc);
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
index c841e61..b686114 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/procedures.jsp
@@ -27,9 +27,10 @@
import="java.util.Set"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.HBaseConfiguration"
- import="org.apache.hadoop.hbase.ProcedureInfo"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv"
+ import="org.apache.hadoop.hbase.ProcedureInfo"
+ import="org.apache.hadoop.hbase.procedure2.LockInfo"
import="org.apache.hadoop.hbase.procedure2.ProcedureExecutor"
import="org.apache.hadoop.hbase.procedure2.store.wal.ProcedureWALFile"
import="org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore"
@@ -55,6 +56,8 @@
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
});
+
+ List<LockInfo> locks = master.listLocks();
%>
<!--[if IE]>
<!DOCTYPE html>
@@ -62,15 +65,15 @@
<?xml version="1.0" encoding="UTF-8" ?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
- <meta charset="utf-8">
+ <meta charset="utf-8" />
<title>HBase Master Procedures: <%= master.getServerName() %></title>
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
- <meta name="description" content="">
- <meta name="author" content="">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+ <meta name="description" content="" />
+ <meta name="author" content="" />
- <link href="/static/css/bootstrap.min.css" rel="stylesheet">
- <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet">
- <link href="/static/css/hbase.css" rel="stylesheet">
+ <link href="/static/css/bootstrap.min.css" rel="stylesheet" />
+ <link href="/static/css/bootstrap-theme.min.css" rel="stylesheet" />
+ <link href="/static/css/hbase.css" rel="stylesheet" />
</head>
<body>
<div class="navbar navbar-fixed-top navbar-default">
@@ -87,7 +90,7 @@
<ul class="nav navbar-nav">
<li><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
@@ -116,43 +119,42 @@
<th>Last Update</th>
<th>Errors</th>
</tr>
- <tr>
- <% for (ProcedureInfo procInfo : procedures) { %>
+ <% for (ProcedureInfo procInfo : procedures) { %>
<tr>
- <td><%= procInfo.getProcId() %></a></td>
- <td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></a></td>
- <td><%= escapeXml(procInfo.getProcState().toString()) %></a></td>
- <td><%= escapeXml(procInfo.getProcOwner()) %></a></td>
- <td><%= escapeXml(procInfo.getProcName()) %></a></td>
- <td><%= new Date(procInfo.getSubmittedTime()) %></a></td>
- <td><%= new Date(procInfo.getLastUpdate()) %></a></td>
- <td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></a></td>
+ <td><%= procInfo.getProcId() %></td>
+ <td><%= procInfo.hasParentId() ? procInfo.getParentId() : "" %></td>
+ <td><%= escapeXml(procInfo.getProcState().toString()) %></td>
+ <td><%= escapeXml(procInfo.getProcOwner()) %></td>
+ <td><%= escapeXml(procInfo.getProcName()) %></td>
+ <td><%= new Date(procInfo.getSubmittedTime()) %></td>
+ <td><%= new Date(procInfo.getLastUpdate()) %></td>
+ <td><%= escapeXml(procInfo.isFailed() ? procInfo.getException().getMessage() : "") %></td>
</tr>
<% } %>
</table>
</div>
-<br>
+<br />
<div class="container-fluid content">
<div class="row">
<div class="page-header">
<h2>Procedure WAL State</h2>
</div>
</div>
-<div class="tabbable">
- <ul class="nav nav-pills">
- <li class="active">
- <a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
- </li>
- <li class="">
- <a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
- </li>
- <li class="">
- <a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
- </li>
- <li class="">
- <a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
- </li>
- </ul>
+ <div class="tabbable">
+ <ul class="nav nav-pills">
+ <li class="active">
+ <a href="#tab_WALFiles" data-toggle="tab">WAL files</a>
+ </li>
+ <li class="">
+ <a href="#tab_WALFilesCorrupted" data-toggle="tab">Corrupted WAL files</a>
+ </li>
+ <li class="">
+ <a href="#tab_WALRollTime" data-toggle="tab">WAL roll time</a>
+ </li>
+ <li class="">
+ <a href="#tab_SyncStats" data-toggle="tab">Sync stats</a>
+ </li>
+ </ul>
<div class="tab-content" style="padding-bottom: 9px; border-bottom: 1px solid #ddd;">
<div class="tab-pane active" id="tab_WALFiles">
<% if (procedureWALFiles != null && procedureWALFiles.size() > 0) { %>
@@ -168,8 +170,8 @@
<tr>
<td> <%= pwf.getLogId() %></td>
<td> <%= StringUtils.humanSize(pwf.getSize()) %> </td>
- <td> <%= new Date(pwf.getTimestamp()) %></a></td>
- <td> <%= escapeXml(pwf.toString()) %></t>
+ <td> <%= new Date(pwf.getTimestamp()) %> </td>
+ <td> <%= escapeXml(pwf.toString()) %> </td>
</tr>
<% } %>
</table>
@@ -190,8 +192,8 @@
<tr>
<td> <%= cwf.getLogId() %></td>
<td> <%= StringUtils.humanSize(cwf.getSize()) %> </td>
- <td> <%= new Date(cwf.getTimestamp()) %></a></td>
- <td> <%= escapeXml(cwf.toString()) %></t>
+ <td> <%= new Date(cwf.getTimestamp()) %> </td>
+ <td> <%= escapeXml(cwf.toString()) %> </td>
</tr>
<% } %>
</table>
@@ -223,7 +225,7 @@
<% for (int i = syncMetricsBuff.size() - 1; i >= 0; --i) { %>
<% WALProcedureStore.SyncMetrics syncMetrics = syncMetricsBuff.get(i); %>
<tr>
- <td> <%= new Date(syncMetrics.getTimestamp()) %></a></td>
+ <td> <%= new Date(syncMetrics.getTimestamp()) %></td>
<td> <%= StringUtils.humanTimeDiff(syncMetrics.getSyncWaitMs()) %></td>
<td> <%= syncMetrics.getSyncedEntries() %></td>
<td> <%= StringUtils.humanSize(syncMetrics.getTotalSyncedBytes()) %></td>
@@ -235,6 +237,51 @@
</div>
</div>
</div>
+<br />
+<div class="container-fluid content">
+ <div class="row">
+ <div class="page-header">
+ <h1>Locks</h1>
+ </div>
+ </div>
+ <% for (LockInfo lock : locks) { %>
+ <h2><%= lock.getResourceType() %>: <%= lock.getResourceName() %></h2>
+ <%
+ switch (lock.getLockType()) {
+ case EXCLUSIVE:
+ %>
+ <p>Lock type: EXCLUSIVE</p>
+ <p>Owner procedure ID: <%= lock.getExclusiveLockOwnerProcedure().getProcId() %></p>
+ <%
+ break;
+ case SHARED:
+ %>
+ <p>Lock type: SHARED</p>
+ <p>Number of shared locks: <%= lock.getSharedLockCount() %></p>
+ <%
+ break;
+ }
+
+ List<LockInfo.WaitingProcedure> waitingProcedures = lock.getWaitingProcedures();
+
+ if (!waitingProcedures.isEmpty()) {
+ %>
+ <h3>Waiting procedures</h3>
+ <table class="table table-striped" width="90%" >
+ <tr>
+ <th>Lock type</th>
+ <th>Procedure ID</th>
+ </tr>
+ <% for (LockInfo.WaitingProcedure waitingProcedure : waitingProcedures) { %>
+ <tr>
+ <td><%= waitingProcedure.getLockType() %></td>
+ <td><%= waitingProcedure.getProcedure().getProcId() %></td>
+ </tr>
+ <% } %>
+ </table>
+ <% } %>
+ <% } %>
+</div>
<script src="/static/js/jquery.min.js" type="text/javascript"></script>
<script src="/static/js/bootstrap.min.js" type="text/javascript"></script>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
index 90f639b..75f75fc 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshot.jsp
@@ -94,7 +94,7 @@
<ul class="nav navbar-nav">
<li><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
index aa9a17f..58f74f4 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/snapshotsStats.jsp
@@ -81,7 +81,7 @@
<ul class="nav navbar-nav">
<li><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
index 0f8a289..0e1d1cf 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp
@@ -158,7 +158,7 @@
<ul class="nav navbar-nav">
<li><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
index d21be3e..a485e8b 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/tablesDetailed.jsp
@@ -64,7 +64,7 @@
<ul class="nav navbar-nav">
<li class="active"><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
index 6cd6c92..a2e6733 100644
--- a/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
+++ b/hbase-server/src/main/resources/hbase-webapps/master/zk.jsp
@@ -60,7 +60,7 @@
<ul class="nav navbar-nav">
<li><a href="/master-status">Home</a></li>
<li><a href="/tablesDetailed.jsp">Table Details</a></li>
- <li><a href="/procedures.jsp">Procedures</a></li>
+ <li><a href="/procedures.jsp">Procedures & Locks</a></li>
<li><a href="/logs/">Local Logs</a></li>
<li><a href="/logLevel">Log Level</a></li>
<li><a href="/dump">Debug Dump</a></li>
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index 6b52e0c..3b80406 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
import org.apache.hadoop.hbase.master.locking.LockProcedure;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
import org.apache.hadoop.hbase.regionserver.HRegionServer;
@@ -125,6 +126,8 @@ public class TestMasterObserver {
private boolean postAbortProcedureCalled;
private boolean preListProceduresCalled;
private boolean postListProceduresCalled;
+ private boolean preListLocksCalled;
+ private boolean postListLocksCalled;
private boolean preMoveCalled;
private boolean postMoveCalled;
private boolean preAssignCalled;
@@ -726,6 +729,25 @@ public class TestMasterObserver {
}
@Override
+ public void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException {
+ preListLocksCalled = true;
+ }
+
+ @Override
+ public void postListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx, List<LockInfo> lockInfoList)
+ throws IOException {
+ postListLocksCalled = true;
+ }
+
+ public boolean wasListLocksCalled() {
+ return preListLocksCalled && postListLocksCalled;
+ }
+
+ public boolean wasPreListLocksCalledOnly() {
+ return preListLocksCalled && !postListLocksCalled;
+ }
+
+ @Override
public void preMove(ObserverContext<MasterCoprocessorEnvironment> env,
HRegionInfo region, ServerName srcServer, ServerName destServer)
throws IOException {
@@ -2164,6 +2186,22 @@ public class TestMasterObserver {
cp.wasListProceduresCalled());
}
+ @Test (timeout=180000)
+ public void testListLocksOperation() throws Exception {
+ MiniHBaseCluster cluster = UTIL.getHBaseCluster();
+
+ HMaster master = cluster.getMaster();
+ MasterCoprocessorHost host = master.getMasterCoprocessorHost();
+ CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
+ CPMasterObserver.class.getName());
+ cp.resetStates();
+
+ master.listLocks();
+ assertTrue(
+ "Coprocessor should be called on list locks request",
+ cp.wasListLocksCalled());
+ }
+
private void deleteTable(Admin admin, TableName tableName) throws Exception {
// NOTE: We need a latch because admin is not sync,
// so the postOp coprocessor method may be called after the admin operation returned.
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 683e9b3..ff6b88e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -39,16 +39,14 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
import org.apache.hadoop.hbase.replication.ReplicationException;
import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
-import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-import org.mockito.Mockito;
-
import com.google.protobuf.Service;
public class MockNoopMasterServices implements MasterServices, Server {
@@ -221,6 +219,11 @@ public class MockNoopMasterServices implements MasterServices, Server {
}
@Override
+ public List<LockInfo> listLocks() throws IOException {
+ return null;
+ }
+
+ @Override
public List<HTableDescriptor> listTableDescriptorsByNamespace(String name) throws IOException {
return null; //To change body of implemented methods use File | Settings | File Templates.
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
index 356c84f..e23c90a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/procedure/TestMasterProcedureScheduler.java
@@ -18,21 +18,24 @@
package org.apache.hadoop.hbase.master.procedure;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
+import static org.junit.Assert.*;
import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Method;
import java.util.Arrays;
-
+import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.procedure2.Procedure;
import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.LockInfo.WaitingProcedure;
import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -899,5 +902,161 @@ public class TestMasterProcedureScheduler {
sb.append(")");
}
}
+
+ private static LockProcedure createLockProcedure(LockProcedure.LockType lockType, long procId) throws Exception {
+ LockProcedure procedure = new LockProcedure();
+
+ Field typeField = LockProcedure.class.getDeclaredField("type");
+ typeField.setAccessible(true);
+ typeField.set(procedure, lockType);
+
+ Method setProcIdMethod = Procedure.class.getDeclaredMethod("setProcId", long.class);
+ setProcIdMethod.setAccessible(true);
+ setProcIdMethod.invoke(procedure, procId);
+
+ return procedure;
+ }
+
+ private static LockProcedure createExclusiveLockProcedure(long procId) throws Exception {
+ return createLockProcedure(LockProcedure.LockType.EXCLUSIVE, procId);
+ }
+
+ private static LockProcedure createSharedLockProcedure(long procId) throws Exception {
+ return createLockProcedure(LockProcedure.LockType.SHARED, procId);
+ }
+
+ private static void assertLockResource(LockInfo lock,
+ LockInfo.ResourceType resourceType, String resourceName)
+ {
+ assertEquals(resourceType, lock.getResourceType());
+ assertEquals(resourceName, lock.getResourceName());
+ }
+
+ private static void assertExclusiveLock(LockInfo lock, long procId)
+ {
+ assertEquals(LockInfo.LockType.EXCLUSIVE, lock.getLockType());
+ assertEquals(procId, lock.getExclusiveLockOwnerProcedure().getProcId());
+ assertEquals(0, lock.getSharedLockCount());
+ }
+
+ private static void assertSharedLock(LockInfo lock, int lockCount)
+ {
+ assertEquals(LockInfo.LockType.SHARED, lock.getLockType());
+ assertEquals(lockCount, lock.getSharedLockCount());
+ }
+
+ @Test
+ public void testListLocksServer() throws Exception {
+ LockProcedure procedure = createExclusiveLockProcedure(0);
+ queue.waitServerExclusiveLock(procedure, ServerName.valueOf("server1,1234,0"));
+
+ List<LockInfo> locks = queue.listLocks();
+ assertEquals(1, locks.size());
+
+ LockInfo serverLock = locks.get(0);
+ assertLockResource(serverLock, LockInfo.ResourceType.SERVER, "server1,1234,0");
+ assertExclusiveLock(serverLock, 0);
+ assertTrue(serverLock.getWaitingProcedures().isEmpty());
+ }
+
+ @Test
+ public void testListLocksNamespace() throws Exception {
+ LockProcedure procedure = createExclusiveLockProcedure(1);
+ queue.waitNamespaceExclusiveLock(procedure, "ns1");
+
+ List<LockInfo> locks = queue.listLocks();
+ assertEquals(2, locks.size());
+
+ LockInfo namespaceLock = locks.get(0);
+ assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns1");
+ assertExclusiveLock(namespaceLock, 1);
+ assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+ LockInfo tableLock = locks.get(1);
+ assertLockResource(tableLock, LockInfo.ResourceType.TABLE,
+ TableName.NAMESPACE_TABLE_NAME.getNameAsString());
+ assertSharedLock(tableLock, 1);
+ assertTrue(tableLock.getWaitingProcedures().isEmpty());
+ }
+
+ @Test
+ public void testListLocksTable() throws Exception {
+ LockProcedure procedure = createExclusiveLockProcedure(2);
+ queue.waitTableExclusiveLock(procedure, TableName.valueOf("ns2", "table2"));
+
+ List<LockInfo> locks = queue.listLocks();
+ assertEquals(2, locks.size());
+
+ LockInfo namespaceLock = locks.get(0);
+ assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns2");
+ assertSharedLock(namespaceLock, 1);
+ assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+ LockInfo tableLock = locks.get(1);
+ assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns2:table2");
+ assertExclusiveLock(tableLock, 2);
+ assertTrue(tableLock.getWaitingProcedures().isEmpty());
+ }
+
+ @Test
+ public void testListLocksRegion() throws Exception {
+ LockProcedure procedure = createExclusiveLockProcedure(3);
+ HRegionInfo regionInfo = new HRegionInfo(TableName.valueOf("ns3", "table3"));
+
+ queue.waitRegion(procedure, regionInfo);
+
+ List<LockInfo> locks = queue.listLocks();
+ assertEquals(3, locks.size());
+
+ LockInfo namespaceLock = locks.get(0);
+ assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns3");
+ assertSharedLock(namespaceLock, 1);
+ assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+ LockInfo tableLock = locks.get(1);
+ assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns3:table3");
+ assertSharedLock(tableLock, 1);
+ assertTrue(tableLock.getWaitingProcedures().isEmpty());
+
+ LockInfo regionLock = locks.get(2);
+ assertLockResource(regionLock, LockInfo.ResourceType.REGION, regionInfo.getEncodedName());
+ assertExclusiveLock(regionLock, 3);
+ assertTrue(regionLock.getWaitingProcedures().isEmpty());
+ }
+
+ @Test
+ public void testListLocksWaiting() throws Exception {
+ LockProcedure procedure1 = createExclusiveLockProcedure(1);
+ queue.waitTableExclusiveLock(procedure1, TableName.valueOf("ns4", "table4"));
+
+ LockProcedure procedure2 = createSharedLockProcedure(2);
+ queue.waitTableSharedLock(procedure2, TableName.valueOf("ns4", "table4"));
+
+ LockProcedure procedure3 = createExclusiveLockProcedure(3);
+ queue.waitTableExclusiveLock(procedure3, TableName.valueOf("ns4", "table4"));
+
+ List<LockInfo> locks = queue.listLocks();
+ assertEquals(2, locks.size());
+
+ LockInfo namespaceLock = locks.get(0);
+ assertLockResource(namespaceLock, LockInfo.ResourceType.NAMESPACE, "ns4");
+ assertSharedLock(namespaceLock, 1);
+ assertTrue(namespaceLock.getWaitingProcedures().isEmpty());
+
+ LockInfo tableLock = locks.get(1);
+ assertLockResource(tableLock, LockInfo.ResourceType.TABLE, "ns4:table4");
+ assertExclusiveLock(tableLock, 1);
+
+ List<WaitingProcedure> waitingProcedures = tableLock.getWaitingProcedures();
+ assertEquals(2, waitingProcedures.size());
+
+ WaitingProcedure waitingProcedure1 = waitingProcedures.get(0);
+ assertEquals(LockInfo.LockType.SHARED, waitingProcedure1.getLockType());
+ assertEquals(2, waitingProcedure1.getProcedure().getProcId());
+
+ WaitingProcedure waitingProcedure2 = waitingProcedures.get(1);
+ assertEquals(LockInfo.LockType.EXCLUSIVE, waitingProcedure2.getLockType());
+ assertEquals(3, waitingProcedure2.getProcedure().getProcId());
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
index f943ce4..c88c370 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/protobuf/TestProtobufUtil.java
@@ -20,21 +20,24 @@ package org.apache.hadoop.hbase.protobuf;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellComparator;
-import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Column;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
@@ -336,4 +339,40 @@ public class TestProtobufUtil {
Cell newOffheapKV = org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil.toCell(cell);
assertTrue(CellComparator.COMPARATOR.compare(offheapKV, newOffheapKV) == 0);
}
+
+ private static ProcedureInfo createProcedureInfo(long procId)
+ {
+ return new ProcedureInfo(procId, "java.lang.Object", null,
+ ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
+ }
+
+ private static void assertProcedureInfoEquals(ProcedureInfo expected,
+ ProcedureInfo result)
+ {
+ if (expected == result) {
+ return;
+ } else if (expected == null || result == null) {
+ fail();
+ }
+
+ assertEquals(expected.getProcId(), result.getProcId());
+ }
+
+ private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
+ {
+ assertEquals(expected.getResourceType(), result.getResourceType());
+ assertEquals(expected.getResourceName(), result.getResourceName());
+ assertEquals(expected.getLockType(), result.getLockType());
+ assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
+ result.getExclusiveLockOwnerProcedure());
+ assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
+ }
+
+ private static void assertWaitingProcedureEquals(
+ LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
+ {
+ assertEquals(expected.getLockType(), result.getLockType());
+ assertProcedureInfoEquals(expected.getProcedure(),
+ result.getProcedure());
+ }
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
new file mode 100644
index 0000000..da7c7c4
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java
@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.shaded.protobuf;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.hbase.ProcedureInfo;
+import org.apache.hadoop.hbase.ProcedureState;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(SmallTests.class)
+public class TestProtobufUtil {
+ public TestProtobufUtil() {
+ }
+
+ private static ProcedureInfo createProcedureInfo(long procId)
+ {
+ return new ProcedureInfo(procId, "java.lang.Object", null,
+ ProcedureState.RUNNABLE, -1, null, null, 0, 0, null);
+ }
+
+ private static void assertProcedureInfoEquals(ProcedureInfo expected,
+ ProcedureInfo result)
+ {
+ if (expected == result) {
+ return;
+ } else if (expected == null || result == null) {
+ fail();
+ }
+
+ assertEquals(expected.getProcId(), result.getProcId());
+ }
+
+ private static void assertLockInfoEquals(LockInfo expected, LockInfo result)
+ {
+ assertEquals(expected.getResourceType(), result.getResourceType());
+ assertEquals(expected.getResourceName(), result.getResourceName());
+ assertEquals(expected.getLockType(), result.getLockType());
+ assertProcedureInfoEquals(expected.getExclusiveLockOwnerProcedure(),
+ result.getExclusiveLockOwnerProcedure());
+ assertEquals(expected.getSharedLockCount(), result.getSharedLockCount());
+ }
+
+ private static void assertWaitingProcedureEquals(
+ LockInfo.WaitingProcedure expected, LockInfo.WaitingProcedure result)
+ {
+ assertEquals(expected.getLockType(), result.getLockType());
+ assertProcedureInfoEquals(expected.getProcedure(),
+ result.getProcedure());
+ }
+
+ @Test
+ public void testServerLockInfo() {
+ LockInfo lock = new LockInfo();
+ lock.setResourceType(LockInfo.ResourceType.SERVER);
+ lock.setResourceName("server");
+ lock.setLockType(LockInfo.LockType.SHARED);
+ lock.setSharedLockCount(2);
+
+ LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+ LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+ assertLockInfoEquals(lock, lock2);
+ }
+
+ @Test
+ public void testNamespaceLockInfo() {
+ LockInfo lock = new LockInfo();
+ lock.setResourceType(LockInfo.ResourceType.NAMESPACE);
+ lock.setResourceName("ns");
+ lock.setLockType(LockInfo.LockType.EXCLUSIVE);
+ lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
+
+ LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+ LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+ assertLockInfoEquals(lock, lock2);
+ }
+
+ @Test
+ public void testTableLockInfo() {
+ LockInfo lock = new LockInfo();
+ lock.setResourceType(LockInfo.ResourceType.TABLE);
+ lock.setResourceName("table");
+ lock.setLockType(LockInfo.LockType.SHARED);
+ lock.setSharedLockCount(2);
+
+ LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+ LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+ assertLockInfoEquals(lock, lock2);
+ }
+
+ @Test
+ public void testRegionLockInfo() {
+ LockInfo lock = new LockInfo();
+ lock.setResourceType(LockInfo.ResourceType.REGION);
+ lock.setResourceName("region");
+ lock.setLockType(LockInfo.LockType.EXCLUSIVE);
+ lock.setExclusiveLockOwnerProcedure(createProcedureInfo(2));
+
+ LockServiceProtos.LockInfo proto = ProtobufUtil.toProtoLockInfo(lock);
+ LockInfo lock2 = ProtobufUtil.toLockInfo(proto);
+
+ assertLockInfoEquals(lock, lock2);
+ }
+
+ @Test
+ public void testExclusiveWaitingLockInfo() {
+ LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+ waitingProcedure.setLockType(LockInfo.LockType.EXCLUSIVE);
+ waitingProcedure.setProcedure(createProcedureInfo(1));
+
+ LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
+ LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
+
+ assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
+ }
+
+ @Test
+ public void testSharedWaitingLockInfo() {
+ LockInfo.WaitingProcedure waitingProcedure = new LockInfo.WaitingProcedure();
+ waitingProcedure.setLockType(LockInfo.LockType.SHARED);
+ waitingProcedure.setProcedure(createProcedureInfo(2));
+
+ LockServiceProtos.WaitingProcedure proto = ProtobufUtil.toProtoWaitingProcedure(waitingProcedure);
+ LockInfo.WaitingProcedure waitingProcedure2 = ProtobufUtil.toWaitingProcedure(proto);
+
+ assertWaitingProcedureEquals(waitingProcedure, waitingProcedure2);
+ }
+}
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/hbase/admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index 6aaa130..41904be 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -1192,6 +1192,11 @@ module Hbase
@admin.listProcedures()
end
+ # List all locks
+ def list_locks()
+ @admin.listLocks();
+ end
+
# Parse arguments and update HTableDescriptor accordingly
def update_htd_from_arg(htd, arg)
htd.setOwnerString(arg.delete(OWNER)) if arg[OWNER]
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 66480f9..fc55f94 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -436,10 +436,11 @@ Shell.load_command_group(
Shell.load_command_group(
'procedures',
- :full_name => 'PROCEDURES MANAGEMENT',
+ :full_name => 'PROCEDURES & LOCKS MANAGEMENT',
:commands => %w[
abort_procedure
list_procedures
+ list_locks
]
)
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/commands.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands.rb b/hbase-shell/src/main/ruby/shell/commands.rb
index 271a7d9..08f2e11 100644
--- a/hbase-shell/src/main/ruby/shell/commands.rb
+++ b/hbase-shell/src/main/ruby/shell/commands.rb
@@ -98,6 +98,11 @@ module Shell
@formatter ||= ::Shell::Formatter::Console.new
end
+ # for testing purposes to catch the output of the commands
+ def set_formatter(formatter)
+ @formatter = formatter
+ end
+
def translate_hbase_exceptions(*args)
yield
rescue => e
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_locks.rb b/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
new file mode 100644
index 0000000..fca411b
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/list_locks.rb
@@ -0,0 +1,60 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+ module Commands
+ class ListLocks < Command
+ def help
+ return <<-EOF
+List all locks in hbase. Examples:
+
+ hbase> list_locks
+EOF
+ end
+
+ def command()
+ list = admin.list_locks()
+
+ list.each do |lock|
+ formatter.output_strln("#{lock.resourceType}(#{lock.resourceName})")
+
+ case lock.lockType
+ when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::EXCLUSIVE then
+ formatter.output_strln("Lock type: EXCLUSIVE, procedure: #{lock.exclusiveLockOwnerProcedure.procId}")
+ when org.apache.hadoop.hbase.procedure2.LockInfo::LockType::SHARED then
+ formatter.output_strln("Lock type: SHARED, count: #{lock.sharedLockCount}")
+ end
+
+ if lock.waitingProcedures.any?
+ formatter.output_strln("Waiting procedures:")
+ formatter.header([ "Lock type", "Procedure Id" ])
+
+ lock.waitingProcedures.each do |waitingProcedure|
+ formatter.row([ waitingProcedure.lockType.to_s, waitingProcedure.procedure.procId.to_s ]);
+ end
+
+ formatter.footer(lock.waitingProcedures.size)
+ end
+
+ formatter.output_strln("");
+ end
+ end
+ end
+ end
+end
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/main/ruby/shell/formatter.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/formatter.rb b/hbase-shell/src/main/ruby/shell/formatter.rb
index 2f800f6..aa81c86 100644
--- a/hbase-shell/src/main/ruby/shell/formatter.rb
+++ b/hbase-shell/src/main/ruby/shell/formatter.rb
@@ -17,6 +17,8 @@
# limitations under the License.
#
+require 'stringio'
+
# Results formatter
module Shell
module Formatter
@@ -25,7 +27,7 @@ module Shell
attr_reader :row_count
def is_valid_io?(obj)
- obj.instance_of?(IO) || obj == Kernel
+ obj.instance_of?(IO) || obj.instance_of?(StringIO) || obj == Kernel
end
def refresh_width()
@@ -166,6 +168,11 @@ module Shell
output(@max_width, str)
end
+ def output_strln(str)
+ output_str(str)
+ @out.puts
+ end
+
def output(width, str)
if str == nil
str = ''
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-shell/src/test/ruby/shell/list_locks_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/shell/list_locks_test.rb b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
new file mode 100644
index 0000000..fe132db
--- /dev/null
+++ b/hbase-shell/src/test/ruby/shell/list_locks_test.rb
@@ -0,0 +1,152 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+require 'hbase_constants'
+require 'shell'
+
+class ListLocksTest < Test::Unit::TestCase
+ def setup
+ @hbase = ::Hbase::Hbase.new($TEST_CLUSTER.getConfiguration)
+ @shell = Shell::Shell.new(@hbase)
+ @master = $TEST_CLUSTER.getHBaseClusterInterface.getMaster
+ @scheduler = @master.getMasterProcedureExecutor.getEnvironment.getProcedureScheduler
+
+ @string_io = StringIO.new
+
+ @list_locks = Shell::Commands::ListLocks.new(@shell)
+ @list_locks.set_formatter(Shell::Formatter::Base.new({ :output_stream => @string_io }))
+ end
+
+ def set_field(object, field_name, value)
+ field = object.getClass.getDeclaredField(field_name)
+ field.setAccessible(true)
+ field.set(object, value)
+ end
+
+ def create_lock(type, proc_id)
+ lock = org.apache.hadoop.hbase.master.locking.LockProcedure.new()
+ set_field(lock, "type", type)
+ lock.procId = proc_id
+
+ return lock
+ end
+
+ def create_exclusive_lock(proc_id)
+ return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::EXCLUSIVE, proc_id)
+ end
+
+ def create_shared_lock(proc_id)
+ return create_lock(org.apache.hadoop.hbase.master.locking.LockProcedure::LockType::SHARED, proc_id)
+ end
+
+ define_test "list server locks" do
+ lock = create_exclusive_lock(0)
+
+ server_name = org.apache.hadoop.hbase.ServerName.valueOf("server1,1234,0")
+
+ @scheduler.waitServerExclusiveLock(lock, server_name)
+ @list_locks.command()
+ @scheduler.wakeServerExclusiveLock(lock, server_name)
+
+ assert_equal(
+ "SERVER(server1,1234,0)\n" <<
+ "Lock type: EXCLUSIVE, procedure: 0\n\n",
+ @string_io.string)
+ end
+
+ define_test "list namespace locks" do
+ lock = create_exclusive_lock(1)
+
+ @scheduler.waitNamespaceExclusiveLock(lock, "ns1")
+ @list_locks.command()
+ @scheduler.wakeNamespaceExclusiveLock(lock, "ns1")
+
+ assert_equal(
+ "NAMESPACE(ns1)\n" <<
+ "Lock type: EXCLUSIVE, procedure: 1\n\n" <<
+ "TABLE(hbase:namespace)\n" <<
+ "Lock type: SHARED, count: 1\n\n",
+ @string_io.string)
+ end
+
+ define_test "list table locks" do
+ lock = create_exclusive_lock(2)
+
+ table_name = org.apache.hadoop.hbase.TableName.valueOf("ns2", "table2")
+
+ @scheduler.waitTableExclusiveLock(lock, table_name)
+ @list_locks.command()
+ @scheduler.wakeTableExclusiveLock(lock, table_name)
+
+ assert_equal(
+ "NAMESPACE(ns2)\n" <<
+ "Lock type: SHARED, count: 1\n\n" <<
+ "TABLE(ns2:table2)\n" <<
+ "Lock type: EXCLUSIVE, procedure: 2\n\n",
+ @string_io.string)
+ end
+
+ define_test "list region locks" do
+ lock = create_exclusive_lock(3)
+
+ table_name = org.apache.hadoop.hbase.TableName.valueOf("ns3", "table3")
+ region_info = org.apache.hadoop.hbase.HRegionInfo.new(table_name)
+
+ @scheduler.waitRegion(lock, region_info)
+ @list_locks.command()
+ @scheduler.wakeRegion(lock, region_info)
+
+ assert_equal(
+ "NAMESPACE(ns3)\n" <<
+ "Lock type: SHARED, count: 1\n\n" <<
+ "TABLE(ns3:table3)\n" <<
+ "Lock type: SHARED, count: 1\n\n" <<
+ "REGION(" << region_info.getEncodedName << ")\n" <<
+ "Lock type: EXCLUSIVE, procedure: 3\n\n",
+ @string_io.string)
+ end
+
+ define_test "list waiting locks" do
+ table_name = org.apache.hadoop.hbase.TableName.valueOf("ns4", "table4")
+
+ lock1 = create_exclusive_lock(1)
+ set_field(lock1, "tableName", table_name)
+
+ lock2 = create_shared_lock(2)
+ set_field(lock2, "tableName", table_name)
+
+ @scheduler.waitTableExclusiveLock(lock1, table_name)
+ @scheduler.waitTableSharedLock(lock2, table_name)
+ @list_locks.command()
+ @scheduler.wakeTableExclusiveLock(lock1, table_name)
+ @scheduler.wakeTableSharedLock(lock2, table_name)
+
+ assert_equal(
+ "NAMESPACE(ns4)\n" <<
+ "Lock type: SHARED, count: 1\n\n" <<
+ "TABLE(ns4:table4)\n" <<
+ "Lock type: EXCLUSIVE, procedure: 1\n" <<
+ "Waiting procedures:\n" <<
+ "Lock type Procedure Id\n" <<
+ " SHARED 2\n" <<
+ "1 row(s)\n\n",
+ @string_io.string)
+ end
+
+end