You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2017/09/11 07:42:51 UTC

[24/50] [abbrv] hbase git commit: HBASE-18106 Redo ProcedureInfo and LockInfo

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
index b1d0669..4882168 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hbase.procedure2;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
@@ -336,11 +334,13 @@ public class TestYieldProcedures {
     }
 
     @Override
-    protected void serializeStateData(final OutputStream stream) throws IOException {
+    protected void serializeStateData(ProcedureStateSerializer serializer)
+        throws IOException {
     }
 
     @Override
-    protected void deserializeStateData(final InputStream stream) throws IOException {
+    protected void deserializeStateData(ProcedureStateSerializer serializer)
+        throws IOException {
     }
   }
 
@@ -353,6 +353,7 @@ public class TestYieldProcedures {
 
     public TestScheduler() {}
 
+    @Override
     public void addFront(final Procedure proc) {
       addFrontCalls++;
       super.addFront(proc);

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
index 9b8c46f..44c8e12 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseCommonTestingUtility;
 import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.LoadCounter;
 import org.apache.hadoop.hbase.procedure2.ProcedureTestingUtility.TestProcedure;
@@ -43,9 +44,9 @@ import org.apache.hadoop.hbase.procedure2.SequentialProcedure;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStoreTracker;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Int64Value;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
 import org.apache.hadoop.hbase.testclassification.MasterTests;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.IOUtils;
 
 import org.junit.After;
@@ -514,7 +515,7 @@ public class TestWALProcedureStore {
     storeRestart(loader);
     assertTrue(procStore.getCorruptedLogs() != null);
     assertEquals(1, procStore.getCorruptedLogs().size());
-    assertEquals(85, loader.getLoadedCount());
+    assertEquals(87, loader.getLoadedCount());
     assertEquals(0, loader.getCorruptedCount());
   }
 
@@ -911,22 +912,22 @@ public class TestWALProcedureStore {
     protected boolean abort(Void env) { return false; }
 
     @Override
-    protected void serializeStateData(final OutputStream stream) throws IOException {
+    protected void serializeStateData(ProcedureStateSerializer serializer)
+        throws IOException {
       long procId = getProcId();
       if (procId % 2 == 0) {
-        stream.write(Bytes.toBytes(procId));
+        Int64Value.Builder builder = Int64Value.newBuilder().setValue(procId);
+        serializer.serialize(builder.build());
       }
     }
 
     @Override
-    protected void deserializeStateData(InputStream stream) throws IOException {
+    protected void deserializeStateData(ProcedureStateSerializer serializer)
+        throws IOException {
       long procId = getProcId();
       if (procId % 2 == 0) {
-        byte[] bProcId = new byte[8];
-        assertEquals(8, stream.read(bProcId));
-        assertEquals(procId, Bytes.toLong(bProcId));
-      } else {
-        assertEquals(0, stream.available());
+        Int64Value value = serializer.deserialize(Int64Value.class);
+        assertEquals(procId, value.getValue());
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index 1898e68..567dee7 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -71,25 +71,20 @@ message LockProcedureData {
   optional bool is_master_lock = 6 [default = false];
 }
 
-enum ResourceType {
-  RESOURCE_TYPE_SERVER = 1;
-  RESOURCE_TYPE_NAMESPACE = 2;
-  RESOURCE_TYPE_TABLE = 3;
-  RESOURCE_TYPE_REGION = 4;
+enum LockedResourceType {
+  SERVER = 1;
+  NAMESPACE = 2;
+  TABLE = 3;
+  REGION = 4;
 }
 
-message WaitingProcedure {
-  required LockType lock_type = 1;
-  required Procedure procedure = 2;
-}
-
-message LockInfo {
-  required ResourceType resource_type = 1;
+message LockedResource {
+  required LockedResourceType resource_type = 1;
   optional string resource_name = 2;
   required LockType lock_type = 3;
   optional Procedure exclusive_lock_owner_procedure = 4;
   optional int32 shared_lock_count = 5;
-  repeated WaitingProcedure waitingProcedures = 6;
+  repeated Procedure waitingProcedures = 6;
 }
 
 service LockService {

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-protocol-shaded/src/main/protobuf/Master.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index 33f9bf3..2c1694e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -544,18 +544,18 @@ message AbortProcedureResponse {
   required bool is_procedure_aborted = 1;
 }
 
-message ListProceduresRequest {
+message GetProceduresRequest {
 }
 
-message ListProceduresResponse {
+message GetProceduresResponse {
   repeated Procedure procedure = 1;
 }
 
-message ListLocksRequest {
+message GetLocksRequest {
 }
 
-message ListLocksResponse {
-  repeated LockInfo lock = 1;
+message GetLocksResponse {
+  repeated LockedResource lock = 1;
 }
 
 message SetQuotaRequest {
@@ -917,11 +917,11 @@ service MasterService {
     returns(AbortProcedureResponse);
 
   /** returns a list of procedures */
-  rpc ListProcedures(ListProceduresRequest)
-    returns(ListProceduresResponse);
+  rpc GetProcedures(GetProceduresRequest)
+    returns(GetProceduresResponse);
 
-  rpc ListLocks(ListLocksRequest)
-    returns(ListLocksResponse);
+  rpc GetLocks(GetLocksRequest)
+    returns(GetLocksResponse);
 
   /** Add a replication peer */
   rpc AddReplicationPeer(AddReplicationPeerRequest)

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-protocol-shaded/src/main/protobuf/Procedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Procedure.proto b/hbase-protocol-shaded/src/main/protobuf/Procedure.proto
index 1a3ecf5..c13a37e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Procedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Procedure.proto
@@ -23,6 +23,7 @@ option java_generic_services = true;
 option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
+import "google/protobuf/any.proto";
 import "ErrorHandling.proto";
 
 enum ProcedureState {
@@ -55,7 +56,8 @@ message Procedure {
   // user state/results
   optional ForeignExceptionMessage exception = 10;
   optional bytes result = 11;           // opaque (user) result structure
-  optional bytes state_data = 12;       // opaque (user) procedure internal-state
+  optional bytes state_data = 12;       // opaque (user) procedure internal-state - OBSOLATE
+  repeated google.protobuf.Any state_message = 15; // opaque (user) procedure internal-state
 
   // Nonce to prevent same procedure submit by multiple times
   optional uint64 nonce_group = 13 [default = 0];

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
index e3f5be5..f0cefe4 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupAdminServer.java
@@ -38,17 +38,16 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.master.HMaster;
-import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
-import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.RegionState;
 import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.locking.LockManager;
-import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.net.Address;
-
+import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 
@@ -253,7 +252,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     for (TableName table: tables) {
       LOG.info("Unassigning region(s) from " + table + " for table move to " + targetGroupName);
       LockManager.MasterLock lock = master.getLockManager().createMasterLock(table,
-              LockProcedure.LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move");
+              LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move");
       try {
         try {
           lock.acquire();
@@ -420,7 +419,7 @@ public class RSGroupAdminServer implements RSGroupAdmin {
     }
     for (TableName table: tables) {
       LockManager.MasterLock lock = master.getLockManager().createMasterLock(table,
-          LockProcedure.LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move");
+          LockType.EXCLUSIVE, this.getClass().getName() + ": RSGroup: table move");
       try {
         try {
           lock.acquire();

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index 8e368ba..f1cf49d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -43,7 +42,9 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.procedure2.LockedResource;
+import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas;
@@ -970,38 +971,38 @@ public interface MasterObserver extends Coprocessor {
       throws IOException {}
 
   /**
-   * Called before a listProcedures request has been processed.
+   * Called before a getProcedures request has been processed.
    * @param ctx the environment to interact with the framework and master
    */
-  default void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  default void preGetProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {}
 
   /**
-   * Called after a listProcedures request has been processed.
+   * Called after a getProcedures request has been processed.
    * @param ctx the environment to interact with the framework and master
-   * @param procInfoList the list of procedures about to be returned
+   * @param procList the list of procedures about to be returned
    */
-  default void postListProcedures(
+  default void postGetProcedures(
       ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<ProcedureInfo> procInfoList) throws IOException {}
+      List<Procedure<?>> procList) throws IOException {}
 
   /**
-   * Called before a listLocks request has been processed.
+   * Called before a getLocks request has been processed.
    * @param ctx the environment to interact with the framework and master
    * @throws IOException if something went wrong
    */
-  default void preListLocks(ObserverContext<MasterCoprocessorEnvironment> ctx)
+  default void preGetLocks(ObserverContext<MasterCoprocessorEnvironment> ctx)
       throws IOException {}
 
   /**
-   * Called after a listLocks request has been processed.
+   * Called after a getLocks request has been processed.
    * @param ctx the environment to interact with the framework and master
-   * @param lockInfoList the list of locks about to be returned
+   * @param lockedResources the list of locks about to be returned
    * @throws IOException if something went wrong
    */
-  default void postListLocks(
+  default void postGetLocks(
       ObserverContext<MasterCoprocessorEnvironment> ctx,
-      List<LockInfo> lockInfoList) throws IOException {}
+      List<LockedResource> lockedResources) throws IOException {}
 
   /**
    * Called prior to moving a given region from one region server to another.
@@ -1890,7 +1891,7 @@ public interface MasterObserver extends Coprocessor {
    * @param ctx the environment to interact with the framework and master
    */
   default void preRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
-      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      TableName tableName, HRegionInfo[] regionInfos, LockType type,
       String description) throws IOException {}
 
   /**
@@ -1898,7 +1899,7 @@ public interface MasterObserver extends Coprocessor {
    * @param ctx the environment to interact with the framework and master
    */
   default void postRequestLock(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace,
-      TableName tableName, HRegionInfo[] regionInfos, LockProcedure.LockType type,
+      TableName tableName, HRegionInfo[] regionInfos, LockType type,
       String description) throws IOException {}
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
index c4438bb..c4c1495 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ExpiredMobFileCleanerChore.java
@@ -29,10 +29,10 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.master.locking.LockManager;
-import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.ExpiredMobFileCleaner;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.procedure2.LockType;
 
 /**
  * The Class ExpiredMobFileCleanerChore for running cleaner regularly to remove the expired
@@ -68,7 +68,7 @@ public class ExpiredMobFileCleanerChore extends ScheduledChore {
             // clean only for mob-enabled column.
             // obtain a read table lock before cleaning, synchronize with MobFileCompactionChore.
             final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
-                MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.SHARED,
+                MobUtils.getTableLockName(htd.getTableName()), LockType.SHARED,
                 this.getClass().getSimpleName() + ": Cleaning expired mob files");
             try {
               lock.acquire();

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index ad304ae..0c79c58 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -68,7 +68,6 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.PleaseHoldException;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -138,11 +137,10 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
 import org.apache.hadoop.hbase.monitoring.TaskMonitor;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure.flush.MasterFlushTableProcedureManager;
-import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.procedure2.LockedResource;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
-import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
@@ -3051,41 +3049,35 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public List<ProcedureInfo> listProcedures() throws IOException {
+  public List<Procedure<?>> getProcedures() throws IOException {
     if (cpHost != null) {
-      cpHost.preListProcedures();
+      cpHost.preGetProcedures();
     }
 
-    final List<Procedure> procList = this.procedureExecutor.listProcedures();
-    final List<ProcedureInfo> procInfoList = new ArrayList<>(procList.size());
-
-    for (Procedure proc : procList) {
-      ProcedureInfo procInfo = ProcedureUtil.convertToProcedureInfo(proc);
-      procInfoList.add(procInfo);
-    }
+    final List<Procedure<?>> procList = this.procedureExecutor.getProcedures();
 
     if (cpHost != null) {
-      cpHost.postListProcedures(procInfoList);
+      cpHost.postGetProcedures(procList);
     }
 
-    return procInfoList;
+    return procList;
   }
 
   @Override
-  public List<LockInfo> listLocks() throws IOException {
+  public List<LockedResource> getLocks() throws IOException {
     if (cpHost != null) {
-      cpHost.preListLocks();
+      cpHost.preGetLocks();
     }
 
     MasterProcedureScheduler procedureScheduler = procedureExecutor.getEnvironment().getProcedureScheduler();
 
-    final List<LockInfo> lockInfoList = procedureScheduler.listLocks();
+    final List<LockedResource> lockedResources = procedureScheduler.getLocks();
 
     if (cpHost != null) {
-      cpHost.postListLocks(lockInfoList);
+      cpHost.postGetLocks(lockedResources);
     }
 
-    return lockInfoList;
+    return lockedResources;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
index 6c43fc0..eaa4f5f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterCoprocessorHost.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaMutationAnnotation;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
@@ -54,7 +53,9 @@ import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.metrics.MetricRegistry;
 import org.apache.hadoop.hbase.net.Address;
-import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.procedure2.LockedResource;
+import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.User;
@@ -691,42 +692,42 @@ public class MasterCoprocessorHost
     });
   }
 
-  public boolean preListProcedures() throws IOException {
+  public boolean preGetProcedures() throws IOException {
     return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
           throws IOException {
-        oserver.preListProcedures(ctx);
+        oserver.preGetProcedures(ctx);
       }
     });
   }
 
-  public void postListProcedures(final List<ProcedureInfo> procInfoList) throws IOException {
+  public void postGetProcedures(final List<Procedure<?>> procInfoList) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
           throws IOException {
-        oserver.postListProcedures(ctx, procInfoList);
+        oserver.postGetProcedures(ctx, procInfoList);
       }
     });
   }
 
-  public boolean preListLocks() throws IOException {
+  public boolean preGetLocks() throws IOException {
     return execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
           throws IOException {
-        oserver.preListLocks(ctx);
+        oserver.preGetLocks(ctx);
       }
     });
   }
 
-  public void postListLocks(final List<LockInfo> lockInfoList) throws IOException {
+  public void postGetLocks(final List<LockedResource> lockedResources) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
           throws IOException {
-        oserver.postListLocks(ctx, lockInfoList);
+        oserver.postGetLocks(ctx, lockedResources);
       }
     });
   }
@@ -1837,7 +1838,7 @@ public class MasterCoprocessorHost
   }
 
   public void preRequestLock(String namespace, TableName tableName, HRegionInfo[] regionInfos,
-      LockProcedure.LockType type, String description) throws IOException {
+      LockType type, String description) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)
@@ -1848,7 +1849,7 @@ public class MasterCoprocessorHost
   }
 
   public void postRequestLock(String namespace, TableName tableName, HRegionInfo[] regionInfos,
-      LockProcedure.LockType type, String description) throws IOException {
+      LockType type, String description) throws IOException {
     execOperation(coprocessors.isEmpty() ? null : new CoprocessorOperation() {
       @Override
       public void call(MasterObserver oserver, ObserverContext<MasterCoprocessorEnvironment> ctx)

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
index d092efe..52b88db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterMobCompactionThread.java
@@ -35,8 +35,8 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.master.locking.LockManager;
-import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
@@ -120,7 +120,7 @@ public class MasterMobCompactionThread {
     public void run() {
       // These locks are on dummy table names, and only used for compaction/mob file cleaning.
       final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
-          MobUtils.getTableLockName(tableName), LockProcedure.LockType.EXCLUSIVE,
+          MobUtils.getTableLockName(tableName), LockType.EXCLUSIVE,
           this.getClass().getName() + ": mob compaction");
       try {
         for (ColumnFamilyDescriptor hcd : hcds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 3ec2c45..971fa3b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.ServerLoad;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -60,8 +59,10 @@ import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
 import org.apache.hadoop.hbase.mob.MobUtils;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
-import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.procedure2.LockedResource;
 import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.quotas.QuotaObserverChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
@@ -82,8 +83,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegi
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.*;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameStringPair;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
@@ -92,8 +95,111 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockH
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockService;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.*;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse.Capability;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.BalanceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DrainRegionServersResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetLocksResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProceduresResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableStateResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListDrainingRegionServersResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampForRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MajorCompactionTimestampResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MasterService;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyTableResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MoveRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.NormalizeResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.OfflineRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RemoveDrainFromRegionServersResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCatalogScanResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.RunCleanerChoreResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SecurityCapabilitiesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest;
@@ -108,6 +214,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest;
@@ -116,10 +224,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRe
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.NamespaceQuotaSnapshot;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetQuotaStatesResponse.TableQuotaSnapshot;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.GetSpaceQuotaRegionSizesResponse.RegionSizes;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse;
@@ -1078,13 +1186,13 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public ListProceduresResponse listProcedures(
+  public GetProceduresResponse getProcedures(
       RpcController rpcController,
-      ListProceduresRequest request) throws ServiceException {
+      GetProceduresRequest request) throws ServiceException {
     try {
-      final ListProceduresResponse.Builder response = ListProceduresResponse.newBuilder();
-      for (ProcedureInfo p: master.listProcedures()) {
-        response.addProcedure(ProtobufUtil.toProtoProcedure(p));
+      final GetProceduresResponse.Builder response = GetProceduresResponse.newBuilder();
+      for (Procedure<?> p: master.getProcedures()) {
+        response.addProcedure(ProcedureUtil.convertToProtoProcedure(p));
       }
       return response.build();
     } catch (IOException e) {
@@ -1093,14 +1201,14 @@ public class MasterRpcServices extends RSRpcServices
   }
 
   @Override
-  public ListLocksResponse listLocks(
+  public GetLocksResponse getLocks(
       RpcController controller,
-      ListLocksRequest request) throws ServiceException {
+      GetLocksRequest request) throws ServiceException {
     try {
-      final ListLocksResponse.Builder builder = ListLocksResponse.newBuilder();
+      final GetLocksResponse.Builder builder = GetLocksResponse.newBuilder();
 
-      for (LockInfo lockInfo: master.listLocks()) {
-        builder.addLock(ProtobufUtil.toProtoLockInfo(lockInfo));
+      for (LockedResource lockedResource: master.getLocks()) {
+        builder.addLock(ProcedureUtil.convertToProtoLockedResource(lockedResource));
       }
 
       return builder.build();
@@ -1655,28 +1763,28 @@ public class MasterRpcServices extends RSRpcServices
     SecurityCapabilitiesResponse.Builder response = SecurityCapabilitiesResponse.newBuilder();
     try {
       master.checkInitialized();
-      Set<Capability> capabilities = new HashSet<>();
+      Set<SecurityCapabilitiesResponse.Capability> capabilities = new HashSet<>();
       // Authentication
       if (User.isHBaseSecurityEnabled(master.getConfiguration())) {
-        capabilities.add(Capability.SECURE_AUTHENTICATION);
+        capabilities.add(SecurityCapabilitiesResponse.Capability.SECURE_AUTHENTICATION);
       } else {
-        capabilities.add(Capability.SIMPLE_AUTHENTICATION);
+        capabilities.add(SecurityCapabilitiesResponse.Capability.SIMPLE_AUTHENTICATION);
       }
       // The AccessController can provide AUTHORIZATION and CELL_AUTHORIZATION
       if (master.cpHost != null &&
             master.cpHost.findCoprocessor(AccessController.class.getName()) != null) {
         if (AccessController.isAuthorizationSupported(master.getConfiguration())) {
-          capabilities.add(Capability.AUTHORIZATION);
+          capabilities.add(SecurityCapabilitiesResponse.Capability.AUTHORIZATION);
         }
         if (AccessController.isCellAuthorizationSupported(master.getConfiguration())) {
-          capabilities.add(Capability.CELL_AUTHORIZATION);
+          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_AUTHORIZATION);
         }
       }
       // The VisibilityController can provide CELL_VISIBILITY
       if (master.cpHost != null &&
             master.cpHost.findCoprocessor(VisibilityController.class.getName()) != null) {
         if (VisibilityController.isCellAuthorizationSupported(master.getConfiguration())) {
-          capabilities.add(Capability.CELL_VISIBILITY);
+          capabilities.add(SecurityCapabilitiesResponse.Capability.CELL_VISIBILITY);
         }
       }
       response.addAllCapabilities(capabilities);
@@ -1846,7 +1954,7 @@ public class MasterRpcServices extends RSRpcServices
         throw new IllegalArgumentException("Empty description");
       }
       NonceProcedureRunnable npr;
-      LockProcedure.LockType type = LockProcedure.LockType.valueOf(request.getLockType().name());
+      LockType type = LockType.valueOf(request.getLockType().name());
       if (request.getRegionInfoCount() > 0) {
         final HRegionInfo[] regionInfos = new HRegionInfo[request.getRegionInfoCount()];
         for (int i = 0; i < request.getRegionInfoCount(); ++i) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index cde9e34..6d5c53f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -22,7 +22,6 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.ProcedureInfo;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -41,7 +40,8 @@ import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
-import org.apache.hadoop.hbase.procedure2.LockInfo;
+import org.apache.hadoop.hbase.procedure2.LockedResource;
+import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -362,18 +362,18 @@ public interface MasterServices extends Server {
       throws IOException;
 
   /**
-   * List procedures
+   * Get procedures
    * @return procedure list
    * @throws IOException
    */
-  public List<ProcedureInfo> listProcedures() throws IOException;
+  public List<Procedure<?>> getProcedures() throws IOException;
 
   /**
-   * List locks
+   * Get locks
    * @return lock list
    * @throws IOException
    */
-  public List<LockInfo> listLocks() throws IOException;
+  public List<LockedResource> getLocks() throws IOException;
 
   /**
    * Get list of table descriptors by namespace

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
index 476c65c..2e4ff9a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobCompactionChore.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.master.locking.LockManager;
-import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.procedure2.LockType;
 
 /**
  * The Class MobCompactChore for running compaction regularly to merge small mob files.
@@ -64,7 +64,7 @@ public class MobCompactionChore extends ScheduledChore {
         boolean reported = false;
         try {
           final LockManager.MasterLock lock = master.getLockManager().createMasterLock(
-              MobUtils.getTableLockName(htd.getTableName()), LockProcedure.LockType.EXCLUSIVE,
+              MobUtils.getTableLockName(htd.getTableName()), LockType.EXCLUSIVE,
               this.getClass().getName() + ": mob compaction");
           for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
             if (!hcd.isMobEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
index d78ba74..abc9796 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
@@ -20,9 +20,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -38,6 +35,7 @@ import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionOpenOperation;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -121,7 +119,8 @@ public class AssignProcedure extends RegionTransitionProcedure {
   }
 
   @Override
-  public void serializeStateData(final OutputStream stream) throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
     final AssignRegionStateData.Builder state = AssignRegionStateData.newBuilder()
         .setTransitionState(getTransitionState())
         .setRegionInfo(HRegionInfo.convert(getRegionInfo()));
@@ -131,12 +130,13 @@ public class AssignProcedure extends RegionTransitionProcedure {
     if (this.targetServer != null) {
       state.setTargetServer(ProtobufUtil.toServerName(this.targetServer));
     }
-    state.build().writeDelimitedTo(stream);
+    serializer.serialize(state.build());
   }
 
   @Override
-  public void deserializeStateData(final InputStream stream) throws IOException {
-    final AssignRegionStateData state = AssignRegionStateData.parseDelimitedFrom(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    final AssignRegionStateData state = serializer.deserialize(AssignRegionStateData.class);
     setTransitionState(state.getTransitionState());
     setRegionInfo(HRegionInfo.convert(state.getRegionInfo()));
     forceNewPlan = state.getForceNewPlan();

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
index c7d97ee..27f6707 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCMergedRegionsProcedure.java
@@ -18,9 +18,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -29,6 +26,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -132,21 +130,23 @@ extends AbstractStateMachineTableProcedure<GCMergedRegionsState> {
   }
 
   @Override
-  protected void serializeStateData(OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
     final MasterProcedureProtos.GCMergedRegionsStateData.Builder msg =
         MasterProcedureProtos.GCMergedRegionsStateData.newBuilder().
         setParentA(HRegionInfo.convert(this.father)).
         setParentB(HRegionInfo.convert(this.mother)).
         setMergedChild(HRegionInfo.convert(this.mergedChild));
-    msg.build().writeDelimitedTo(stream);
+    serializer.serialize(msg.build());
   }
 
   @Override
-  protected void deserializeStateData(InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
     final MasterProcedureProtos.GCMergedRegionsStateData msg =
-        MasterProcedureProtos.GCMergedRegionsStateData.parseDelimitedFrom(stream);
+        serializer.deserialize(MasterProcedureProtos.GCMergedRegionsStateData.class);
     this.father = HRegionInfo.convert(msg.getParentA());
     this.mother = HRegionInfo.convert(msg.getParentB());
     this.mergedChild = HRegionInfo.convert(msg.getMergedChild());

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
index 29d0676..2d7f239 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/GCRegionProcedure.java
@@ -18,9 +18,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
@@ -32,6 +29,7 @@ import org.apache.hadoop.hbase.favored.FavoredNodesManager;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
@@ -131,20 +129,22 @@ public class GCRegionProcedure extends AbstractStateMachineRegionProcedure<GCReg
   }
 
   @Override
-  protected void serializeStateData(OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
     // Double serialization of regionname. Superclass is also serializing. Fix.
     final MasterProcedureProtos.GCRegionStateData.Builder msg =
         MasterProcedureProtos.GCRegionStateData.newBuilder()
         .setRegionInfo(HRegionInfo.convert(getRegion()));
-    msg.build().writeDelimitedTo(stream);
+    serializer.serialize(msg.build());
   }
 
   @Override
-  protected void deserializeStateData(InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
     final MasterProcedureProtos.GCRegionStateData msg =
-        MasterProcedureProtos.GCRegionStateData.parseDelimitedFrom(stream);
+        serializer.deserialize(MasterProcedureProtos.GCRegionStateData.class);
     setRegion(HRegionInfo.convert(msg.getRegionInfo()));
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
index c398c9a..1741bd6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MergeTableRegionsProcedure.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -53,13 +51,15 @@ import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
-import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MergeTableRegionsState;
@@ -67,8 +67,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSUtils;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
 /**
  * The procedure to Merge a region in a table.
  * This procedure takes an exclusive table lock since it is working over multiple regions.
@@ -346,8 +344,9 @@ public class MergeTableRegionsProcedure
   }
 
   @Override
-  public void serializeStateData(final OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
 
     final MasterProcedureProtos.MergeTableRegionsStateData.Builder mergeTableRegionsMsg =
         MasterProcedureProtos.MergeTableRegionsStateData.newBuilder()
@@ -357,15 +356,16 @@ public class MergeTableRegionsProcedure
     for (int i = 0; i < regionsToMerge.length; ++i) {
       mergeTableRegionsMsg.addRegionInfo(HRegionInfo.convert(regionsToMerge[i]));
     }
-    mergeTableRegionsMsg.build().writeDelimitedTo(stream);
+    serializer.serialize(mergeTableRegionsMsg.build());
   }
 
   @Override
-  public void deserializeStateData(final InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
 
     final MasterProcedureProtos.MergeTableRegionsStateData mergeTableRegionsMsg =
-        MasterProcedureProtos.MergeTableRegionsStateData.parseDelimitedFrom(stream);
+        serializer.deserialize(MasterProcedureProtos.MergeTableRegionsStateData.class);
     setUser(MasterProcedureUtil.toUserInfo(mergeTableRegionsMsg.getUserInfo()));
 
     assert(mergeTableRegionsMsg.getRegionInfoCount() == 2);
@@ -479,7 +479,7 @@ public class MergeTableRegionsProcedure
           new IOException("Merge of " + regionsStr + " failed because merge switch is off"));
       return false;
     }
-    
+
 
     // Ask the remote regionserver if regions are mergeable. If we get an IOE, report it
     // along w/ the failure so can see why we are not mergeable at this time.

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
index 1907e98..9e0d5f1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/MoveRegionProcedure.java
@@ -20,8 +20,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -32,6 +30,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.MoveRegionStateData;
@@ -124,8 +123,9 @@ public class MoveRegionProcedure extends AbstractStateMachineRegionProcedure<Mov
   }
 
   @Override
-  protected void serializeStateData(final OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
 
     final MoveRegionStateData.Builder state = MoveRegionStateData.newBuilder()
         // No need to serialize the HRegionInfo. The super class has the region.
@@ -133,14 +133,16 @@ public class MoveRegionProcedure extends AbstractStateMachineRegionProcedure<Mov
     if (plan.getDestination() != null) {
       state.setDestinationServer(ProtobufUtil.toServerName(plan.getDestination()));
     }
-    state.build().writeDelimitedTo(stream);
+
+    serializer.serialize(state.build());
   }
 
   @Override
-  protected void deserializeStateData(final InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
 
-    final MoveRegionStateData state = MoveRegionStateData.parseDelimitedFrom(stream);
+    final MoveRegionStateData state = serializer.deserialize(MoveRegionStateData.class);
     final HRegionInfo regionInfo = getRegion(); // Get it from super class deserialization.
     final ServerName sourceServer = ProtobufUtil.toServerName(state.getSourceServer());
     final ServerName destinationServer = state.hasDestinationServer() ?

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
index 8a26380..a81bbe1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java
@@ -22,9 +22,7 @@ import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTe
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InterruptedIOException;
-import java.io.OutputStream;
 import java.util.*;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -39,7 +37,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.ServerName;
@@ -59,6 +56,7 @@ import org.apache.hadoop.hbase.master.procedure.AbstractStateMachineRegionProced
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.quotas.QuotaExceededException;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.SplitTableRegionState;
@@ -323,8 +321,9 @@ public class SplitTableRegionProcedure
   }
 
   @Override
-  public void serializeStateData(final OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
 
     final MasterProcedureProtos.SplitTableRegionStateData.Builder splitTableRegionMsg =
         MasterProcedureProtos.SplitTableRegionStateData.newBuilder()
@@ -332,15 +331,16 @@ public class SplitTableRegionProcedure
         .setParentRegionInfo(HRegionInfo.convert(getRegion()))
         .addChildRegionInfo(HRegionInfo.convert(daughter_1_HRI))
         .addChildRegionInfo(HRegionInfo.convert(daughter_2_HRI));
-    splitTableRegionMsg.build().writeDelimitedTo(stream);
+    serializer.serialize(splitTableRegionMsg.build());
   }
 
   @Override
-  public void deserializeStateData(final InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
 
     final MasterProcedureProtos.SplitTableRegionStateData splitTableRegionsMsg =
-        MasterProcedureProtos.SplitTableRegionStateData.parseDelimitedFrom(stream);
+        serializer.deserialize(MasterProcedureProtos.SplitTableRegionStateData.class);
     setUser(MasterProcedureUtil.toUserInfo(splitTableRegionsMsg.getUserInfo()));
     setRegion(HRegionInfo.convert(splitTableRegionsMsg.getParentRegionInfo()));
     assert(splitTableRegionsMsg.getChildRegionInfoCount() == 2);
@@ -678,6 +678,7 @@ public class SplitTableRegionProcedure
       this.family = family;
     }
 
+    @Override
     public Pair<Path,Path> call() throws IOException {
       return splitStoreFile(regionFs, family, sf);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
index c9f0fac..7d875b6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/UnassignProcedure.java
@@ -20,11 +20,6 @@
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.net.ConnectException;
-import java.util.concurrent.atomic.AtomicBoolean;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HConstants;
@@ -40,6 +35,7 @@ import org.apache.hadoop.hbase.master.procedure.ServerCrashException;
 import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.RegionCloseOperation;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.procedure2.ProcedureMetrics;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
@@ -48,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.regionserver.RegionServerAbortedException;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 
-
 /**
  * Procedure that describes the unassignment of a single region.
  * There can only be one RegionTransitionProcedure -- i.e. an assign or an unassign -- per region
@@ -128,7 +123,8 @@ public class UnassignProcedure extends RegionTransitionProcedure {
   }
 
   @Override
-  public void serializeStateData(final OutputStream stream) throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
     UnassignRegionStateData.Builder state = UnassignRegionStateData.newBuilder()
         .setTransitionState(getTransitionState())
         .setHostingServer(ProtobufUtil.toServerName(this.hostingServer))
@@ -139,12 +135,14 @@ public class UnassignProcedure extends RegionTransitionProcedure {
     if (force) {
       state.setForce(true);
     }
-    state.build().writeDelimitedTo(stream);
+    serializer.serialize(state.build());
   }
 
   @Override
-  public void deserializeStateData(final InputStream stream) throws IOException {
-    final UnassignRegionStateData state = UnassignRegionStateData.parseDelimitedFrom(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    final UnassignRegionStateData state =
+        serializer.deserialize(UnassignRegionStateData.class);
     setTransitionState(state.getTransitionState());
     setRegionInfo(HRegionInfo.convert(state.getRegionInfo()));
     this.hostingServer = ProtobufUtil.toServerName(state.getHostingServer());

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
index 6c8bbba..87ad557d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockManager.java
@@ -18,19 +18,20 @@
  */
 package org.apache.hadoop.hbase.master.locking;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.procedure2.LockType;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.util.NonceKey;
 
-import java.io.IOException;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
 /**
  * Functions to acquire lock on table/namespace/regions.
  */
@@ -50,12 +51,12 @@ public final class LockManager {
   }
 
   public MasterLock createMasterLock(final String namespace,
-      final LockProcedure.LockType type, final String description) {
+      final LockType type, final String description) {
     return new MasterLock(namespace, type, description);
   }
 
   public MasterLock createMasterLock(final TableName tableName,
-      final LockProcedure.LockType type, final String description) {
+      final LockType type, final String description) {
     return new MasterLock(tableName, type, description);
   }
 
@@ -81,13 +82,13 @@ public final class LockManager {
     private final String namespace;
     private final TableName tableName;
     private final HRegionInfo[] regionInfos;
-    private final LockProcedure.LockType type;
+    private final LockType type;
     private final String description;
 
     private LockProcedure proc = null;
 
     public MasterLock(final String namespace,
-        final LockProcedure.LockType type, final String description) {
+        final LockType type, final String description) {
       this.namespace = namespace;
       this.tableName = null;
       this.regionInfos = null;
@@ -96,7 +97,7 @@ public final class LockManager {
     }
 
     public MasterLock(final TableName tableName,
-        final LockProcedure.LockType type, final String description) {
+        final LockType type, final String description) {
       this.namespace = null;
       this.tableName = tableName;
       this.regionInfos = null;
@@ -108,7 +109,7 @@ public final class LockManager {
       this.namespace = null;
       this.tableName = null;
       this.regionInfos = regionInfos;
-      this.type = LockProcedure.LockType.EXCLUSIVE;
+      this.type = LockType.EXCLUSIVE;
       this.description = description;
     }
 
@@ -203,7 +204,7 @@ public final class LockManager {
    * locks, regular heartbeats are required to keep the lock held.
    */
   public class RemoteLocks {
-    public long requestNamespaceLock(final String namespace, final LockProcedure.LockType type,
+    public long requestNamespaceLock(final String namespace, final LockType type,
         final String description, final NonceKey nonceKey)
         throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(namespace, null, null, type, description);
@@ -214,7 +215,7 @@ public final class LockManager {
       return proc.getProcId();
     }
 
-    public long requestTableLock(final TableName tableName, final LockProcedure.LockType type,
+    public long requestTableLock(final TableName tableName, final LockType type,
         final String description, final NonceKey nonceKey)
         throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(null, tableName, null, type, description);
@@ -232,12 +233,12 @@ public final class LockManager {
         final NonceKey nonceKey)
     throws IllegalArgumentException, IOException {
       master.getMasterCoprocessorHost().preRequestLock(null, null, regionInfos,
-            LockProcedure.LockType.EXCLUSIVE, description);
+            LockType.EXCLUSIVE, description);
       final LockProcedure proc = new LockProcedure(master.getConfiguration(), regionInfos,
-          LockProcedure.LockType.EXCLUSIVE, description, null);
+          LockType.EXCLUSIVE, description, null);
       submitProcedure(proc, nonceKey);
       master.getMasterCoprocessorHost().postRequestLock(null, null, regionInfos,
-            LockProcedure.LockType.EXCLUSIVE, description);
+            LockType.EXCLUSIVE, description);
       return proc.getProcId();
     }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
index edbba83..e7b4168 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/locking/LockProcedure.java
@@ -27,8 +27,10 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos;
@@ -36,8 +38,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockP
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicLong;
@@ -66,9 +66,6 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   public static final String LOCAL_MASTER_LOCKS_TIMEOUT_MS_CONF =
       "hbase.master.procedure.local.master.locks.timeout.ms";
 
-  // Also used in serialized states, changes will affect backward compatibility.
-  public enum LockType { SHARED, EXCLUSIVE }
-
   private String namespace;
   private TableName tableName;
   private HRegionInfo[] regionInfos;
@@ -265,7 +262,8 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
   }
 
   @Override
-  protected void serializeStateData(final OutputStream stream) throws IOException {
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
     final LockProcedureData.Builder builder = LockProcedureData.newBuilder()
           .setLockType(LockServiceProtos.LockType.valueOf(type.name()))
           .setDescription(description);
@@ -281,12 +279,13 @@ public final class LockProcedure extends Procedure<MasterProcedureEnv>
     if (lockAcquireLatch != null) {
       builder.setIsMasterLock(true);
     }
-    builder.build().writeDelimitedTo(stream);
+    serializer.serialize(builder.build());
   }
 
   @Override
-  protected void deserializeStateData(final InputStream stream) throws IOException {
-    final LockProcedureData state = LockProcedureData.parseDelimitedFrom(stream);
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    final LockProcedureData state = serializer.deserialize(LockProcedureData.class);
     type = LockType.valueOf(state.getLockType().name());
     description = state.getDescription();
     if (state.getRegionInfoCount() > 0) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/359fed7b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java
index 41502d4..c254cc4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/AbstractStateMachineRegionProcedure.java
@@ -19,14 +19,12 @@
 package org.apache.hadoop.hbase.master.procedure;
 
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 
 /**
@@ -84,6 +82,7 @@ public abstract class AbstractStateMachineRegionProcedure<TState>
    * @param env MasterProcedureEnv
    * @throws IOException
    */
+  @Override
   protected void checkTableModifiable(final MasterProcedureEnv env) throws IOException {
     // Checks whether the table exists
     if (!MetaTableAccessor.tableExists(env.getMasterServices().getConnection(), getTableName())) {
@@ -96,6 +95,7 @@ public abstract class AbstractStateMachineRegionProcedure<TState>
     return true;
   }
 
+  @Override
   protected LockState acquireLock(final MasterProcedureEnv env) {
     if (env.waitInitialized(this)) return LockState.LOCK_EVENT_WAIT;
     if (env.getProcedureScheduler().waitRegions(this, getTableName(), getRegion())) {
@@ -105,6 +105,7 @@ public abstract class AbstractStateMachineRegionProcedure<TState>
     return LockState.LOCK_ACQUIRED;
   }
 
+  @Override
   protected void releaseLock(final MasterProcedureEnv env) {
     this.lock = false;
     env.getProcedureScheduler().wakeRegions(this, getTableName(), getRegion());
@@ -120,14 +121,16 @@ public abstract class AbstractStateMachineRegionProcedure<TState>
   }
 
   @Override
-  protected void serializeStateData(final OutputStream stream) throws IOException {
-    super.serializeStateData(stream);
-    HRegionInfo.convert(getRegion()).writeDelimitedTo(stream);
+  protected void serializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.serializeStateData(serializer);
+    serializer.serialize(HRegionInfo.convert(getRegion()));
   }
 
   @Override
-  protected void deserializeStateData(final InputStream stream) throws IOException {
-    super.deserializeStateData(stream);
-    this.hri = HRegionInfo.convert(HBaseProtos.RegionInfo.parseDelimitedFrom(stream));
+  protected void deserializeStateData(ProcedureStateSerializer serializer)
+      throws IOException {
+    super.deserializeStateData(serializer);
+    this.hri = HRegionInfo.convert(serializer.deserialize(HBaseProtos.RegionInfo.class));
   }
 }
\ No newline at end of file