You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by zh...@apache.org on 2017/12/20 01:30:08 UTC

[01/24] hbase git commit: HBASE-19521 HBase mob compaction needs to check hfile version [Forced Update!]

Repository: hbase
Updated Branches:
  refs/heads/HBASE-19397 d84491484 -> 9dd4ada2a (forced update)


HBASE-19521 HBase mob compaction needs to check hfile version

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c8bf03f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c8bf03f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c8bf03f5

Branch: refs/heads/HBASE-19397
Commit: c8bf03f5fb7f039e6458f0a8b1564ab318dda8de
Parents: 62b591b
Author: QilinCao <ca...@zte.com.cn>
Authored: Fri Dec 15 16:11:25 2017 +0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Dec 18 08:32:18 2017 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/master/MasterRpcServices.java      | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/c8bf03f5/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 3e2f0ef..6044d02 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
 import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.exceptions.UnknownProtocolException;
+import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
 import org.apache.hadoop.hbase.ipc.PriorityFunction;
 import org.apache.hadoop.hbase.ipc.QosPriority;
@@ -1626,6 +1627,7 @@ public class MasterRpcServices extends RSRpcServices
       TableName tableName = RegionInfo.getTable(regionName);
       // if the region is a mob region, do the mob file compaction.
       if (MobUtils.isMobRegionName(tableName, regionName)) {
+        checkHFileFormatVersionForMob();
         return compactMob(request, tableName);
       } else {
         return super.compactRegion(controller, request);
@@ -1635,6 +1637,20 @@ public class MasterRpcServices extends RSRpcServices
     }
   }
 
+  /**
+   * check configured hfile format version before to do compaction
+   * @throws IOException throw IOException
+   */
+  private void checkHFileFormatVersionForMob() throws IOException {
+    if (HFile.getFormatVersion(master.getConfiguration()) < HFile.MIN_FORMAT_VERSION_WITH_TAGS) {
+      LOG.error("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
+          + " is required for MOB compaction. Compaction will not run.");
+      throw new IOException("A minimum HFile version of " + HFile.MIN_FORMAT_VERSION_WITH_TAGS
+          + " is required for MOB feature. Consider setting " + HFile.FORMAT_VERSION_KEY
+          + " accordingly.");
+    }
+  }
+
   @Override
   @QosPriority(priority=HConstants.ADMIN_QOS)
   public GetRegionInfoResponse getRegionInfo(final RpcController controller,


[13/24] hbase git commit: HBASE-19539 Removed unnecessary semicolons in hbase-common

Posted by zh...@apache.org.
HBASE-19539 Removed unnecessary semicolons in hbase-common


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f8ea39a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f8ea39a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f8ea39a

Branch: refs/heads/HBASE-19397
Commit: 0f8ea39a5b88b830ca6280e3262e7aa5fcb6280a
Parents: f46a6d1
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 17 16:28:06 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 20:22:05 2017 +0100

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java |  2 +-
 .../main/java/org/apache/hadoop/hbase/ProcedureState.java |  2 +-
 .../main/java/org/apache/hadoop/hbase/codec/Codec.java    |  2 +-
 .../org/apache/hadoop/hbase/io/TagCompressionContext.java |  2 +-
 .../hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java    |  2 --
 .../test/java/org/apache/hadoop/hbase/ClassFinder.java    | 10 +++++-----
 .../java/org/apache/hadoop/hbase/ClassTestFinder.java     |  6 +++---
 .../test/java/org/apache/hadoop/hbase/TestCellUtil.java   |  4 ++--
 .../java/org/apache/hadoop/hbase/TestClassFinder.java     |  2 +-
 .../hadoop/hbase/io/crypto/KeyProviderForTesting.java     |  3 +--
 10 files changed, 16 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 0adce07..bc6f975 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -90,7 +90,7 @@ public final class HConstants {
     SUCCESS,
     BAD_FAMILY,
     SANITY_CHECK_FAILURE,
-    FAILURE;
+    FAILURE
   }
 
   /** long constant for zero */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureState.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureState.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureState.java
index fba5181..18bdf73 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureState.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ProcedureState.java
@@ -24,5 +24,5 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Public
 public enum ProcedureState {
-  INITIALIZING, RUNNABLE, WAITING, WAITING_TIMEOUT, ROLLEDBACK, SUCCESS, FAILED;
+  INITIALIZING, RUNNABLE, WAITING, WAITING_TIMEOUT, ROLLEDBACK, SUCCESS, FAILED
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
index 01359ea..2d6ac82 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java
@@ -48,7 +48,7 @@ public interface Codec {
    * Decoder/CellScanner runs off the end of the cell block. Do this rather than require the user
    * call close explicitly.
    */
-  interface Decoder extends CellScanner {};
+  interface Decoder extends CellScanner {}
 
   Decoder getDecoder(InputStream is);
   Decoder getDecoder(ByteBuff buf);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
index d646250..595434c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/TagCompressionContext.java
@@ -94,7 +94,7 @@ public class TagCompressionContext {
       while (pos < endOffset) {
         int tagLen = ByteBufferUtils.readAsInt(in, pos, Tag.TAG_LENGTH_SIZE);
         pos += Tag.TAG_LENGTH_SIZE;
-        Dictionary.write(out, in, pos, tagLen, tagDict);;
+        Dictionary.write(out, in, pos, tagLen, tagDict);
         pos += tagLen;
       }
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
index bb2290f..9b58a69 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/crypto/aes/CommonsCryptoAESDecryptor.java
@@ -79,7 +79,5 @@ public class CommonsCryptoAESDecryptor implements Decryptor {
 
   @Override
   public void reset() {
-    ;
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
index db203b8..cc0e969 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassFinder.java
@@ -52,15 +52,15 @@ public class ClassFinder {
 
   public interface ResourcePathFilter {
     boolean isCandidatePath(String resourcePath, boolean isJar);
-  };
+  }
 
   public interface FileNameFilter {
     boolean isCandidateFile(String fileName, String absFilePath);
-  };
+  }
 
   public interface ClassFilter {
     boolean isCandidateClass(Class<?> c);
-  };
+  }
 
   public static class Not implements ResourcePathFilter, FileNameFilter, ClassFilter {
     private ResourcePathFilter resourcePathFilter;
@@ -301,5 +301,5 @@ public class ClassFinder {
               && (null == nameFilter
                 || nameFilter.isCandidateFile(file.getName(), file.getAbsolutePath())));
     }
-  };
-};
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java
index 9d5618b..85824e9 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/ClassTestFinder.java
@@ -64,7 +64,7 @@ public class ClassTestFinder extends ClassFinder {
     public boolean isCandidatePath(String resourcePath, boolean isJar) {
       return !hadoopCompactRe.matcher(resourcePath).find();
     }
-  };
+  }
 
   /*
   * A class is considered as a test class if:
@@ -116,5 +116,5 @@ public class ClassTestFinder extends ClassFinder {
       }
       return false;
     }
-  };
-};
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
index 4ab6bce..aad0929 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
@@ -54,7 +54,7 @@ public class TestCellUtil {
     public CellScanner cellScanner() {
       return new TestCellScanner(this.cellsCount);
     }
-  };
+  }
 
   /**
    * CellScanner used in test.
@@ -203,7 +203,7 @@ public class TestCellUtil {
     public DataType getType() {
       return PrivateCellUtil.toDataType(getTypeByte());
     }
-  };
+  }
 
   /**
    * Was overflowing if 100k or so lists of cellscanners to return.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java
index 3a6d935..d37d731 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestClassFinder.java
@@ -419,4 +419,4 @@ public class TestClassFinder {
     method.invoke(urlClassLoader, new Object[] { jarFile.toURI().toURL() });
     return jarFile.getAbsolutePath();
   }
-};
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/0f8ea39a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java
index 9b45d09..7f0a060 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/io/crypto/KeyProviderForTesting.java
@@ -36,9 +36,8 @@ public class KeyProviderForTesting implements KeyProvider {
   public Key[] getKeys(String[] aliases) {
     Key[] result = new Key[aliases.length];
     for (int i = 0; i < aliases.length; i++) {
-      result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES");;
+      result[i] = new SecretKeySpec(Encryption.hash128(aliases[i]), "AES");
     }
     return result;
   }
-
 }


[18/24] hbase git commit: HBASE-19494 Create simple WALKey filter that can be plugged in on the Replication Sink

Posted by zh...@apache.org.
HBASE-19494 Create simple WALKey filter that can be plugged in on the Replication Sink

Implement new WALEntrySinkFilter (as opposed to WALEntryFilter) and
specify the implmentation (with a no-param constructor) in config
using property hbase.replication.sink.walentrysinkfilter

Signed-off-by: wolfgang hoschek whoscheck@cloudera.com


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/32f6fd41
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/32f6fd41
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/32f6fd41

Branch: refs/heads/HBASE-19397
Commit: 32f6fd41c274a955338b7c43ab80309b9adbba0d
Parents: df351e4
Author: Michael Stack <st...@apache.org>
Authored: Mon Dec 18 12:57:53 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 19 13:48:59 2017 -0800

----------------------------------------------------------------------
 .../hbase/replication/WALEntryFilter.java       |   9 +-
 .../regionserver/ReplicationSink.java           |  47 +-
 .../regionserver/WALEntrySinkFilter.java        |  57 ++
 .../regionserver/TestWALEntrySinkFilter.java    | 549 +++++++++++++++++++
 4 files changed, 650 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/32f6fd41/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
index 0024b12..417f868 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/WALEntryFilter.java
@@ -25,10 +25,16 @@ import org.apache.hadoop.hbase.wal.WAL.Entry;
 /**
  * A Filter for WAL entries before being sent over to replication. Multiple
  * filters might be chained together using {@link ChainWALEntryFilter}.
+ * Applied on the replication source side.
+ * <p>There is also a filter that can be installed on the sink end of a replication stream.
+ * See {@link org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter}. Certain
+ * use-cases may need such a facility but better to filter here on the source side rather
+ * than later, after the edit arrives at the sink.</p>
+ * @see org.apache.hadoop.hbase.replication.regionserver.WALEntrySinkFilter for filtering
+ * replication on the sink-side.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
 public interface WALEntryFilter {
-
   /**
    * Applies the filter, possibly returning a different Entry instance.
    * If null is returned, the entry will be skipped.
@@ -37,5 +43,4 @@ public interface WALEntryFilter {
    * no cells will cause the entry to be skipped for replication.
    */
   public Entry filter(Entry entry);
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/32f6fd41/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
index 2f9f9c5..2194796 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSink.java
@@ -89,6 +89,7 @@ public class ReplicationSink {
   // Number of hfiles that we successfully replicated
   private long hfilesReplicated = 0;
   private SourceFSConfigurationProvider provider;
+  private WALEntrySinkFilter walEntrySinkFilter;
 
   /**
    * Create a sink for replication
@@ -102,7 +103,7 @@ public class ReplicationSink {
     this.conf = HBaseConfiguration.create(conf);
     decorateConf();
     this.metrics = new MetricsSink();
-
+    this.walEntrySinkFilter = setupWALEntrySinkFilter();
     String className =
         conf.get("hbase.replication.source.fs.conf.provider",
           DefaultSourceFSConfigurationProvider.class.getCanonicalName());
@@ -116,6 +117,22 @@ public class ReplicationSink {
     }
   }
 
+  private WALEntrySinkFilter setupWALEntrySinkFilter() throws IOException {
+    Class<?> walEntryFilterClass =
+        this.conf.getClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY, null);
+    WALEntrySinkFilter filter = null;
+    try {
+      filter = walEntryFilterClass == null? null:
+          (WALEntrySinkFilter)walEntryFilterClass.newInstance();
+    } catch (Exception e) {
+      LOG.warn("Failed to instantiate " + walEntryFilterClass);
+    }
+    if (filter != null) {
+      filter.init(getConnection());
+    }
+    return filter;
+  }
+
   /**
    * decorate the Configuration object to make replication more receptive to delays:
    * lessen the timeout and numTries.
@@ -134,8 +151,6 @@ public class ReplicationSink {
   /**
    * Replicate this array of entries directly into the local cluster using the native client. Only
    * operates against raw protobuf type saving on a conversion from pb to pojo.
-   * @param entries
-   * @param cells
    * @param replicationClusterId Id which will uniquely identify source cluster FS client
    *          configurations in the replication configuration directory
    * @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
@@ -147,7 +162,6 @@ public class ReplicationSink {
       String replicationClusterId, String sourceBaseNamespaceDirPath,
       String sourceHFileArchiveDirPath) throws IOException {
     if (entries.isEmpty()) return;
-    if (cells == null) throw new NullPointerException("TODO: Add handling of null CellScanner");
     // Very simple optimization where we batch sequences of rows going
     // to the same table.
     try {
@@ -162,8 +176,21 @@ public class ReplicationSink {
       for (WALEntry entry : entries) {
         TableName table =
             TableName.valueOf(entry.getKey().getTableName().toByteArray());
+        if (this.walEntrySinkFilter != null) {
+          if (this.walEntrySinkFilter.filter(table, entry.getKey().getWriteTime())) {
+            // Skip Cells in CellScanner associated with this entry.
+            int count = entry.getAssociatedCellCount();
+            for (int i = 0; i < count; i++) {
+              // Throw index out of bounds if our cell count is off
+              if (!cells.advance()) {
+                throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i);
+              }
+            }
+            continue;
+          }
+        }
         Cell previousCell = null;
-        Mutation m = null;
+        Mutation mutation = null;
         int count = entry.getAssociatedCellCount();
         for (int i = 0; i < count; i++) {
           // Throw index out of bounds if our cell count is off
@@ -181,7 +208,7 @@ public class ReplicationSink {
             // Handle wal replication
             if (isNewRowOrType(previousCell, cell)) {
               // Create new mutation
-              m =
+              mutation =
                   CellUtil.isDelete(cell) ? new Delete(cell.getRowArray(), cell.getRowOffset(),
                       cell.getRowLength()) : new Put(cell.getRowArray(), cell.getRowOffset(),
                       cell.getRowLength());
@@ -189,13 +216,13 @@ public class ReplicationSink {
               for (HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()) {
                 clusterIds.add(toUUID(clusterId));
               }
-              m.setClusterIds(clusterIds);
-              addToHashMultiMap(rowMap, table, clusterIds, m);
+              mutation.setClusterIds(clusterIds);
+              addToHashMultiMap(rowMap, table, clusterIds, mutation);
             }
             if (CellUtil.isDelete(cell)) {
-              ((Delete) m).add(cell);
+              ((Delete) mutation).add(cell);
             } else {
-              ((Put) m).add(cell);
+              ((Put) mutation).add(cell);
             }
             previousCell = cell;
           }

http://git-wip-us.apache.org/repos/asf/hbase/blob/32f6fd41/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
new file mode 100644
index 0000000..f0b13e1
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/WALEntrySinkFilter.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Implementations are installed on a Replication Sink called from inside
+ * ReplicationSink#replicateEntries to filter replicated WALEntries based off WALEntry attributes.
+ * Currently only table name and replication write time are exposed (WALEntry is a private,
+ * internal class so we cannot pass it here). To install, set
+ * <code>hbase.replication.sink.walentryfilter</code> to the name of the implementing
+ * class. Implementing class must have a no-param Constructor.
+ * <p>This filter is of limited use. It is better to filter on the replication source rather than
+ * here after the edits have been shipped on the replication sink. That said, applications such
+ * as the hbase-indexer want to filter out any edits that were made before replication was enabled.
+ * @see org.apache.hadoop.hbase.replication.WALEntryFilter for filtering on the replication
+ * source-side.
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION)
+public interface WALEntrySinkFilter {
+  /**
+   * Name of configuration to set with name of implementing WALEntrySinkFilter class.
+   */
+  public static final String WAL_ENTRY_FILTER_KEY = "hbase.replication.sink.walentrysinkfilter";
+
+  /**
+   * Called after Construction.
+   * Use passed Connection to keep any context the filter might need.
+   */
+  void init(Connection connection);
+
+  /**
+   * @param table Table edit is destined for.
+   * @param writeTime Time at which the edit was created on the source.
+   * @return True if we are to filter out the edit.
+   */
+  boolean filter(TableName table, long writeTime);
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/32f6fd41/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
new file mode 100644
index 0000000..0761178
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestWALEntrySinkFilter.java
@@ -0,0 +1,549 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.CellBuilderFactory;
+import org.apache.hadoop.hbase.CellBuilderType;
+import org.apache.hadoop.hbase.CellScanner;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.Stoppable;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.BufferedMutator;
+import org.apache.hadoop.hbase.client.BufferedMutatorParams;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionLocator;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Row;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.coprocessor.Batch;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos;
+import org.apache.hadoop.hbase.testclassification.ReplicationTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+import org.junit.rules.TestRule;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Simple test of sink-side wal entry filter facility.
+ */
+@Category({ReplicationTests.class, SmallTests.class})
+public class TestWALEntrySinkFilter {
+  private static final Log LOG = LogFactory.getLog(TestReplicationSink.class);
+  @Rule public TestName name = new TestName();
+  @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
+      withTimeout(this.getClass()).
+      withLookingForStuckThread(true).
+      build();
+  static final int BOUNDARY = 5;
+  static final AtomicInteger UNFILTERED = new AtomicInteger();
+  static final AtomicInteger FILTERED = new AtomicInteger();
+
+  /**
+   * Implemetentation of Stoppable to pass into ReplicationSink.
+   */
+  private static Stoppable STOPPABLE = new Stoppable() {
+    private final AtomicBoolean stop = new AtomicBoolean(false);
+
+    @Override
+    public boolean isStopped() {
+      return this.stop.get();
+    }
+
+    @Override
+    public void stop(String why) {
+      LOG.info("STOPPING BECAUSE: " + why);
+      this.stop.set(true);
+    }
+  };
+
+  /**
+   * Test filter.
+   * Filter will filter out any write time that is <= 5 (BOUNDARY). We count how many items we
+   * filter out and we count how many cells make it through for distribution way down below in the
+   * Table#batch implementation. Puts in place a custom DevNullConnection so we can insert our
+   * counting Table.
+   * @throws IOException
+   */
+  @Test
+  public void testWALEntryFilter() throws IOException {
+    Configuration conf = HBaseConfiguration.create();
+    // Make it so our filter is instantiated on construction of ReplicationSink.
+    conf.setClass(WALEntrySinkFilter.WAL_ENTRY_FILTER_KEY,
+        IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl.class, WALEntrySinkFilter.class);
+    conf.setClass("hbase.client.connection.impl", DevNullConnection.class,
+        Connection.class);
+    ReplicationSink sink = new ReplicationSink(conf, STOPPABLE);
+    // Create some dumb walentries.
+    List< org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry > entries =
+        new ArrayList<>();
+    AdminProtos.WALEntry.Builder entryBuilder = AdminProtos.WALEntry.newBuilder();
+    // Need a tablename.
+    ByteString tableName =
+        ByteString.copyFromUtf8(TableName.valueOf(this.name.getMethodName()).toString());
+    // Add WALEdit Cells to Cells List. The way edits arrive at the sink is with protos
+    // describing the edit with all Cells from all edits aggregated in a single CellScanner.
+    final List<Cell> cells = new ArrayList<>();
+    int count = BOUNDARY * 2;
+    for(int i = 0; i < count; i++) {
+      byte [] bytes = Bytes.toBytes(i);
+      // Create a wal entry. Everything is set to the current index as bytes or int/long.
+      entryBuilder.clear();
+      entryBuilder.setKey(entryBuilder.getKeyBuilder().
+          setLogSequenceNumber(i).
+          setEncodedRegionName(ByteString.copyFrom(bytes)).
+          setWriteTime(i).
+          setTableName(tableName).build());
+      // Lets have one Cell associated with each WALEdit.
+      entryBuilder.setAssociatedCellCount(1);
+      entries.add(entryBuilder.build());
+      // We need to add a Cell per WALEdit to the cells array.
+      CellBuilder cellBuilder = CellBuilderFactory.create(CellBuilderType.DEEP_COPY);
+      // Make cells whose row, family, cell, value, and ts are == 'i'.
+      Cell cell = cellBuilder.
+          setRow(bytes).
+          setFamily(bytes).
+          setQualifier(bytes).
+          setType(Cell.DataType.Put).
+          setTimestamp(i).
+          setValue(bytes).build();
+      cells.add(cell);
+    }
+    // Now wrap our cells array in a CellScanner that we can pass in to replicateEntries. It has
+    // all Cells from all the WALEntries made above.
+    CellScanner cellScanner = new CellScanner() {
+      // Set to -1 because advance gets called before current.
+      int index = -1;
+
+      @Override
+      public Cell current() {
+        return cells.get(index);
+      }
+
+      @Override
+      public boolean advance() throws IOException {
+        index++;
+        return index < cells.size();
+      }
+    };
+    // Call our sink.
+    sink.replicateEntries(entries, cellScanner, null, null, null);
+    // Check what made it through and what was filtered.
+    assertTrue(FILTERED.get() > 0);
+    assertTrue(UNFILTERED.get() > 0);
+    assertEquals(count, FILTERED.get() + UNFILTERED.get());
+  }
+
+  /**
+   * Simple filter that will filter out any entry wholse writeTime is <= 5.
+   */
+  public static class IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl implements WALEntrySinkFilter {
+    public IfTimeIsGreaterThanBOUNDARYWALEntrySinkFilterImpl() {}
+
+    @Override
+    public void init(Connection connection) {
+      // Do nothing.
+    }
+
+    @Override
+    public boolean filter(TableName table, long writeTime) {
+      boolean b = writeTime <= BOUNDARY;
+      if (b) {
+        FILTERED.incrementAndGet();
+      }
+      return b;
+    }
+  }
+
+  /**
+   * A DevNull Connection whose only purpose is checking what edits made it through. See down in
+   * {@link Table#batch(List, Object[])}.
+   */
+  public static class DevNullConnection implements Connection {
+    private final Configuration configuration;
+
+    DevNullConnection(Configuration configuration, ExecutorService es, User user) {
+      this.configuration = configuration;
+    }
+
+    @Override
+    public void abort(String why, Throwable e) {
+
+    }
+
+    @Override
+    public boolean isAborted() {
+      return false;
+    }
+
+    @Override
+    public Configuration getConfiguration() {
+      return this.configuration;
+    }
+
+    @Override
+    public BufferedMutator getBufferedMutator(TableName tableName) throws IOException {
+      return null;
+    }
+
+    @Override
+    public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException {
+      return null;
+    }
+
+    @Override
+    public RegionLocator getRegionLocator(TableName tableName) throws IOException {
+      return null;
+    }
+
+    @Override
+    public Admin getAdmin() throws IOException {
+      return null;
+    }
+
+    @Override
+    public void close() throws IOException {
+
+    }
+
+    @Override
+    public boolean isClosed() {
+      return false;
+    }
+
+    @Override
+    public TableBuilder getTableBuilder(final TableName tableName, ExecutorService pool) {
+      return new TableBuilder() {
+        @Override
+        public TableBuilder setOperationTimeout(int timeout) {
+          return this;
+        }
+
+        @Override
+        public TableBuilder setRpcTimeout(int timeout) {
+          return this;
+        }
+
+        @Override
+        public TableBuilder setReadRpcTimeout(int timeout) {
+          return this;
+        }
+
+        @Override
+        public TableBuilder setWriteRpcTimeout(int timeout) {
+          return this;
+        }
+
+        @Override
+        public Table build() {
+          return new Table() {
+            @Override
+            public TableName getName() {
+              return tableName;
+            }
+
+            @Override
+            public Configuration getConfiguration() {
+              return configuration;
+            }
+
+            @Override
+            public HTableDescriptor getTableDescriptor() throws IOException {
+              return null;
+            }
+
+            @Override
+            public TableDescriptor getDescriptor() throws IOException {
+              return null;
+            }
+
+            @Override
+            public boolean exists(Get get) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean[] exists(List<Get> gets) throws IOException {
+              return new boolean[0];
+            }
+
+            @Override
+            public void batch(List<? extends Row> actions, Object[] results) throws IOException, InterruptedException {
+              for (Row action: actions) {
+                // Row is the index of the loop above where we make WALEntry and Cells.
+                int row = Bytes.toInt(action.getRow());
+                assertTrue("" + row, row> BOUNDARY);
+                UNFILTERED.incrementAndGet();
+              }
+            }
+
+            @Override
+            public <R> void batchCallback(List<? extends Row> actions, Object[] results, Batch.Callback<R> callback) throws IOException, InterruptedException {
+
+            }
+
+            @Override
+            public Result get(Get get) throws IOException {
+              return null;
+            }
+
+            @Override
+            public Result[] get(List<Get> gets) throws IOException {
+              return new Result[0];
+            }
+
+            @Override
+            public ResultScanner getScanner(Scan scan) throws IOException {
+              return null;
+            }
+
+            @Override
+            public ResultScanner getScanner(byte[] family) throws IOException {
+              return null;
+            }
+
+            @Override
+            public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException {
+              return null;
+            }
+
+            @Override
+            public void put(Put put) throws IOException {
+
+            }
+
+            @Override
+            public void put(List<Put> puts) throws IOException {
+
+            }
+
+            @Override
+            public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Put put) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Put put) throws IOException {
+              return false;
+            }
+
+            @Override
+            public void delete(Delete delete) throws IOException {
+
+            }
+
+            @Override
+            public void delete(List<Delete> deletes) throws IOException {
+
+            }
+
+            @Override
+            public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value, Delete delete) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, Delete delete) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, Delete delete) throws IOException {
+              return false;
+            }
+
+            @Override
+            public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
+              return null;
+            }
+
+            @Override
+            public void mutateRow(RowMutations rm) throws IOException {
+
+            }
+
+            @Override
+            public Result append(Append append) throws IOException {
+              return null;
+            }
+
+            @Override
+            public Result increment(Increment increment) throws IOException {
+              return null;
+            }
+
+            @Override
+            public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount) throws IOException {
+              return 0;
+            }
+
+            @Override
+            public long incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, long amount, Durability durability) throws IOException {
+              return 0;
+            }
+
+            @Override
+            public void close() throws IOException {
+
+            }
+
+            @Override
+            public CoprocessorRpcChannel coprocessorService(byte[] row) {
+              return null;
+            }
+
+            @Override
+            public <T extends com.google.protobuf.Service, R> Map<byte[], R> coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, Batch.Call<T, R> callable) throws com.google.protobuf.ServiceException, Throwable {
+              return null;
+            }
+
+            @Override
+            public <T extends com.google.protobuf.Service, R> void coprocessorService(Class<T> service, byte[] startKey, byte[] endKey, Batch.Call<T, R> callable, Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable {
+
+            }
+
+            @Override
+            public <R extends com.google.protobuf.Message> Map<byte[], R> batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype) throws com.google.protobuf.ServiceException, Throwable {
+              return null;
+            }
+
+            @Override
+            public <R extends com.google.protobuf.Message> void batchCoprocessorService(com.google.protobuf.Descriptors.MethodDescriptor methodDescriptor, com.google.protobuf.Message request, byte[] startKey, byte[] endKey, R responsePrototype, Batch.Callback<R> callback) throws com.google.protobuf.ServiceException, Throwable {
+
+            }
+
+            @Override
+            public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareFilter.CompareOp compareOp, byte[] value, RowMutations mutation) throws IOException {
+              return false;
+            }
+
+            @Override
+            public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op, byte[] value, RowMutations mutation) throws IOException {
+              return false;
+            }
+
+            @Override
+            public long getRpcTimeout(TimeUnit unit) {
+              return 0;
+            }
+
+            @Override
+            public int getRpcTimeout() {
+              return 0;
+            }
+
+            @Override
+            public void setRpcTimeout(int rpcTimeout) {
+
+            }
+
+            @Override
+            public long getReadRpcTimeout(TimeUnit unit) {
+              return 0;
+            }
+
+            @Override
+            public int getReadRpcTimeout() {
+              return 0;
+            }
+
+            @Override
+            public void setReadRpcTimeout(int readRpcTimeout) {
+
+            }
+
+            @Override
+            public long getWriteRpcTimeout(TimeUnit unit) {
+              return 0;
+            }
+
+            @Override
+            public int getWriteRpcTimeout() {
+              return 0;
+            }
+
+            @Override
+            public void setWriteRpcTimeout(int writeRpcTimeout) {
+
+            }
+
+            @Override
+            public long getOperationTimeout(TimeUnit unit) {
+              return 0;
+            }
+
+            @Override
+            public int getOperationTimeout() {
+              return 0;
+            }
+
+            @Override
+            public void setOperationTimeout(int operationTimeout) {
+
+            }
+          };
+        }
+      };
+    }
+  }
+}
+
+


[12/24] hbase git commit: HBASE-19540 Reduced number of unnecessary semicolons

Posted by zh...@apache.org.
HBASE-19540 Reduced number of unnecessary semicolons


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f46a6d16
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f46a6d16
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f46a6d16

Branch: refs/heads/HBASE-19397
Commit: f46a6d1637bc2c87001f08472e7ae6e39c8caeba
Parents: dbe409e
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 17 17:27:58 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 20:06:59 2017 +0100

----------------------------------------------------------------------
 .../hadoop/hbase/io/MetricsIOSourceImpl.java    |  5 +-
 .../hbase/IntegrationTestBackupRestore.java     |  3 +-
 .../hbase/IntegrationTestIngestWithMOB.java     |  2 +-
 .../StripeCompactionsPerformanceEvaluation.java |  2 +-
 .../RollingBatchRestartRsExceptMetaAction.java  |  3 +-
 ...tionTestWithCellVisibilityLoadAndVerify.java |  2 +-
 .../hadoop/hbase/mapreduce/SyncTable.java       |  5 +-
 .../hadoop/hbase/PerformanceEvaluation.java     |  2 +-
 .../hadoop/hbase/mapreduce/TestCopyTable.java   |  2 +-
 .../hbase/mapreduce/TestImportExport.java       | 73 ++++++++++----------
 .../procedure2/RemoteProcedureDispatcher.java   |  2 +-
 .../procedure2/store/wal/WALProcedureStore.java |  2 +-
 .../hbase/procedure2/TestProcedureToString.java |  2 +-
 .../procedure2/TestStateMachineProcedure.java   |  3 +-
 .../hadoop/hbase/rest/model/ScannerModel.java   |  2 +-
 .../hbase/rsgroup/RSGroupBasedLoadBalancer.java |  2 +-
 .../hadoop/hbase/thrift/TestThriftServer.java   |  2 +-
 17 files changed, 55 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java
index 3edbc55..b41a46f 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java
@@ -56,12 +56,12 @@ public class MetricsIOSourceImpl extends BaseSourceImpl implements MetricsIOSour
   @Override
   public void updateFsReadTime(long t) {
     fsReadTimeHisto.add(t);
-  };
+  }
 
   @Override
   public void updateFsPReadTime(long t) {
     fsPReadTimeHisto.add(t);
-  };
+  }
 
   @Override
   public void updateFsWriteTime(long t) {
@@ -80,5 +80,4 @@ public class MetricsIOSourceImpl extends BaseSourceImpl implements MetricsIOSour
 
     metricsRegistry.snapshot(mrb, all);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
index f041f72..de307db 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestBackupRestore.java
@@ -164,8 +164,7 @@ public class IntegrationTestBackupRestore extends IntegrationTestBase {
 
     try (Connection conn = util.getConnection();
          Admin admin = conn.getAdmin();
-         BackupAdmin client = new BackupAdminImpl(conn);) {
-
+         BackupAdmin client = new BackupAdminImpl(conn)) {
       // #0- insert some data to table TABLE_NAME1, TABLE_NAME2
       loadData(TABLE_NAME1, rowsInBatch);
       loadData(TABLE_NAME2, rowsInBatch);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
index 010e4b9..0e0e73e 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithMOB.java
@@ -110,7 +110,7 @@ public class IntegrationTestIngestWithMOB extends IntegrationTestIngest {
   @Test
   public void testIngest() throws Exception {
     runIngestTest(JUNIT_RUN_TIME, 100, 10, 1024, 10, 20);
-  };
+  }
 
   @Override
   protected void initTable() throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
index 64f7da1..9e95b01 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/StripeCompactionsPerformanceEvaluation.java
@@ -346,5 +346,5 @@ public class StripeCompactionsPerformanceEvaluation extends AbstractHBaseTool {
     public boolean verify(byte[] rowKey, byte[] cf, Set<byte[]> columnSet) {
       return true;
     }
-  };
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsExceptMetaAction.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsExceptMetaAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsExceptMetaAction.java
index f03b8ec..d0d0fe5 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsExceptMetaAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RollingBatchRestartRsExceptMetaAction.java
@@ -38,6 +38,5 @@ public class RollingBatchRestartRsExceptMetaAction extends RollingBatchRestartRs
     List<ServerName> servers = super.selectServers();
     servers.remove(metaServer);
     return servers;
-  };
-
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
index 3cafe9d..3f97fbb 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/test/IntegrationTestWithCellVisibilityLoadAndVerify.java
@@ -109,7 +109,7 @@ public class IntegrationTestWithCellVisibilityLoadAndVerify extends IntegrationT
   private static User USER1, USER2;
 
   private enum Counters {
-    ROWS_VIS_EXP_1, ROWS_VIS_EXP_2, ROWS_VIS_EXP_3, ROWS_VIS_EXP_4;
+    ROWS_VIS_EXP_1, ROWS_VIS_EXP_2, ROWS_VIS_EXP_3, ROWS_VIS_EXP_4
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
index edef842..eff1596 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/SyncTable.java
@@ -170,9 +170,10 @@ public class SyncTable extends Configured implements Tool {
 
     Throwable mapperException;
 
-    public static enum Counter {BATCHES, HASHES_MATCHED, HASHES_NOT_MATCHED, SOURCEMISSINGROWS,
+    public static enum Counter { BATCHES, HASHES_MATCHED, HASHES_NOT_MATCHED, SOURCEMISSINGROWS,
       SOURCEMISSINGCELLS, TARGETMISSINGROWS, TARGETMISSINGCELLS, ROWSWITHDIFFS, DIFFERENTCELLVALUES,
-      MATCHINGROWS, MATCHINGCELLS, EMPTY_BATCHES, RANGESMATCHED, RANGESNOTMATCHED};
+      MATCHINGROWS, MATCHINGCELLS, EMPTY_BATCHES, RANGESMATCHED, RANGESNOTMATCHED
+    }
 
     @Override
     protected void setup(Context context) throws IOException {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 4f8b82f..ef2d6d0 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1194,7 +1194,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
         for (int i = startRow; i < lastRow; i++) {
           if (i % everyN != 0) continue;
           long startTime = System.nanoTime();
-          try (TraceScope scope = TraceUtil.createTrace("test row");){
+          try (TraceScope scope = TraceUtil.createTrace("test row")){
             testRow(i);
           }
           if ( (i - startRow) > opts.measureAfter) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
index 0bec03b..abb17d6 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java
@@ -80,7 +80,7 @@ public class TestCopyTable {
     final byte[] COLUMN1 = Bytes.toBytes("c1");
 
     try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY);
-         Table t2 = TEST_UTIL.createTable(tableName2, FAMILY);) {
+         Table t2 = TEST_UTIL.createTable(tableName2, FAMILY)) {
       // put rows into the first table
       for (int i = 0; i < 10; i++) {
         Put p = new Put(Bytes.toBytes("row" + i));

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index fcd01b6..2d8a2bd 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -185,7 +185,7 @@ public class TestImportExport {
    */
   @Test
   public void testSimpleCase() throws Throwable {
-    try (Table t = UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILYA, 3);) {
+    try (Table t = UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILYA, 3)) {
       Put p = new Put(ROW1);
       p.addColumn(FAMILYA, QUAL, now, QUAL);
       p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
@@ -203,37 +203,37 @@ public class TestImportExport {
       t.put(p);
     }
 
-      String[] args = new String[] {
-          // Only export row1 & row2.
-          "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1",
-          "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3",
-          name.getMethodName(),
-          FQ_OUTPUT_DIR,
-          "1000", // max number of key versions per key to export
+    String[] args = new String[] {
+      // Only export row1 & row2.
+      "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1",
+      "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3",
+      name.getMethodName(),
+      FQ_OUTPUT_DIR,
+      "1000", // max number of key versions per key to export
+    };
+    assertTrue(runExport(args));
+
+    final String IMPORT_TABLE = name.getMethodName() + "import";
+    try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3)) {
+      args = new String[] {
+        "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING,
+        IMPORT_TABLE,
+        FQ_OUTPUT_DIR
       };
-      assertTrue(runExport(args));
+      assertTrue(runImport(args));
 
-      final String IMPORT_TABLE = name.getMethodName() + "import";
-      try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) {
-        args = new String[] {
-            "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING,
-            IMPORT_TABLE,
-            FQ_OUTPUT_DIR
-        };
-        assertTrue(runImport(args));
-
-        Get g = new Get(ROW1);
-        g.setMaxVersions();
-        Result r = t.get(g);
-        assertEquals(3, r.size());
-        g = new Get(ROW2);
-        g.setMaxVersions();
-        r = t.get(g);
-        assertEquals(3, r.size());
-        g = new Get(ROW3);
-        r = t.get(g);
-        assertEquals(0, r.size());
-      }
+      Get g = new Get(ROW1);
+      g.setMaxVersions();
+      Result r = t.get(g);
+      assertEquals(3, r.size());
+      g = new Get(ROW2);
+      g.setMaxVersions();
+      r = t.get(g);
+      assertEquals(3, r.size());
+      g = new Get(ROW3);
+      r = t.get(g);
+      assertEquals(0, r.size());
+    }
   }
 
   /**
@@ -267,7 +267,7 @@ public class TestImportExport {
     FileSystem fs = FileSystem.get(UTIL.getConfiguration());
     fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name));
     String IMPORT_TABLE = name;
-    try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) {
+    try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3)) {
       String[] args = new String[] {
               "-Dhbase.import.version=0.94" ,
               IMPORT_TABLE, FQ_OUTPUT_DIR
@@ -297,8 +297,7 @@ public class TestImportExport {
               .build())
             .build();
     UTIL.getAdmin().createTable(desc);
-    try (Table t = UTIL.getConnection().getTable(desc.getTableName());) {
-
+    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
       Put p = new Put(ROW1);
       p.addColumn(FAMILYA, QUAL, now, QUAL);
       p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
@@ -329,8 +328,7 @@ public class TestImportExport {
               .build())
             .build();
     UTIL.getAdmin().createTable(desc);
-    try (Table t = UTIL.getConnection().getTable(desc.getTableName());) {
-
+    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
       Put p = new Put(ROW1);
       p.addColumn(FAMILYA, QUAL, now, QUAL);
       p.addColumn(FAMILYA, QUAL, now + 1, QUAL);
@@ -363,7 +361,7 @@ public class TestImportExport {
               .build())
             .build();
     UTIL.getAdmin().createTable(desc);
-    try (Table t = UTIL.getConnection().getTable(desc.getTableName());) {
+    try (Table t = UTIL.getConnection().getTable(desc.getTableName())) {
       args = new String[] {
           IMPORT_TABLE,
           FQ_OUTPUT_DIR
@@ -717,8 +715,7 @@ public class TestImportExport {
   public void testDurability() throws Throwable {
     // Create an export table.
     String exportTableName = name.getMethodName() + "export";
-    try (Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA, 3);) {
-
+    try (Table exportTable = UTIL.createTable(TableName.valueOf(exportTableName), FAMILYA, 3)) {
       // Insert some data
       Put put = new Put(ROW1);
       put.addColumn(FAMILYA, QUAL, now, QUAL);

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 2b66e7c..4cee524 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -371,5 +371,5 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
     public DelayedTask(final FutureTask<Void> task, final long delay, final TimeUnit unit) {
       super(task, EnvironmentEdgeManager.currentTime() + unit.toMillis(delay));
     }
-  };
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
index 84cda65..a3c7cbf 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java
@@ -649,7 +649,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
     slotsCache.offer(slot);
   }
 
-  private enum PushType { INSERT, UPDATE, DELETE };
+  private enum PushType { INSERT, UPDATE, DELETE }
 
   private long pushData(final PushType type, final ByteSlot slot,
       final long procId, final long[] subProcIds) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java
index 9bbef91..921a8e1 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java
@@ -32,7 +32,7 @@ public class TestProcedureToString {
   /**
    * A do-nothing environment for BasicProcedure.
    */
-  static class BasicProcedureEnv {};
+  static class BasicProcedureEnv {}
 
   /**
    * A do-nothing basic procedure just for testing toString.

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java
index cbe50f2..a61370c 100644
--- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java
+++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java
@@ -149,7 +149,8 @@ public class TestStateMachineProcedure {
     assertEquals(TEST_FAILURE_EXCEPTION, cause);
   }
 
-  public enum TestSMProcedureState { STEP_1, STEP_2 };
+  public enum TestSMProcedureState { STEP_1, STEP_2 }
+
   public static class TestSMProcedure
       extends StateMachineProcedure<TestProcEnv, TestSMProcedureState> {
     protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index a678fde..7748272 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -107,7 +107,7 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable {
   private static final long serialVersionUID = 1L;
 
   private byte[] startRow = HConstants.EMPTY_START_ROW;
-  private byte[] endRow = HConstants.EMPTY_END_ROW;;
+  private byte[] endRow = HConstants.EMPTY_END_ROW;
   private List<byte[]> columns = new ArrayList<>();
   private int batch = Integer.MAX_VALUE;
   private long startTime = 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
----------------------------------------------------------------------
diff --git a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
index d838edb..0c24ce5 100644
--- a/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
+++ b/hbase-rsgroup/src/main/java/org/apache/hadoop/hbase/rsgroup/RSGroupBasedLoadBalancer.java
@@ -209,7 +209,7 @@ public class RSGroupBasedLoadBalancer implements RSGroupableBalancer {
       }
 
       for (RegionInfo region : misplacedRegions) {
-        String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());;
+        String groupName = rsGroupInfoManager.getRSGroupOfTable(region.getTable());
         RSGroupInfo info = rsGroupInfoManager.getRSGroup(groupName);
         List<ServerName> candidateList = filterOfflineServers(info, servers);
         ServerName server = this.internalBalancer.randomAssignment(region,

http://git-wip-us.apache.org/repos/asf/hbase/blob/f46a6d16/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
index d0052e5..d19e336 100644
--- a/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
+++ b/hbase-thrift/src/test/java/org/apache/hadoop/hbase/thrift/TestThriftServer.java
@@ -210,7 +210,7 @@ public class TestThriftServer {
     int currentCountDeleteTable = getCurrentCount("deleteTable_num_ops", 2, metrics);
     int currentCountDisableTable = getCurrentCount("disableTable_num_ops", 2, metrics);
     createTestTables(handler);
-    dropTestTables(handler);;
+    dropTestTables(handler);
     metricsHelper.assertCounter("createTable_num_ops", currentCountCreateTable + 2,
       metrics.getSource());
     metricsHelper.assertCounter("deleteTable_num_ops", currentCountDeleteTable + 2,


[09/24] hbase git commit: HBASE-19532 AssignProcedure#COMPARATOR may produce incorrect sort order

Posted by zh...@apache.org.
HBASE-19532 AssignProcedure#COMPARATOR may produce incorrect sort order


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7a7e55b6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7a7e55b6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7a7e55b6

Branch: refs/heads/HBASE-19397
Commit: 7a7e55b601578f18b4eab0faaae060330c707b44
Parents: 74beb5a
Author: tedyu <yu...@gmail.com>
Authored: Mon Dec 18 18:32:24 2017 -0800
Committer: tedyu <yu...@gmail.com>
Committed: Mon Dec 18 18:32:24 2017 -0800

----------------------------------------------------------------------
 .../master/assignment/AssignProcedure.java      |  4 +-
 .../master/snapshot/TestAssignProcedure.java    | 39 +++++++++++++++-----
 2 files changed, 31 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/7a7e55b6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
index 5555062..770d8a4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/AssignProcedure.java
@@ -379,7 +379,7 @@ public class AssignProcedure extends RegionTransitionProcedure {
           return RegionInfo.COMPARATOR.compare(left.getRegionInfo(), right.getRegionInfo());
         }
         return -1;
-      } else if (left.getRegionInfo().isMetaRegion()) {
+      } else if (right.getRegionInfo().isMetaRegion()) {
         return +1;
       }
       if (left.getRegionInfo().getTable().isSystemTable()) {
@@ -387,7 +387,7 @@ public class AssignProcedure extends RegionTransitionProcedure {
           return RegionInfo.COMPARATOR.compare(left.getRegionInfo(), right.getRegionInfo());
         }
         return -1;
-      } else if (left.getRegionInfo().getTable().isSystemTable()) {
+      } else if (right.getRegionInfo().getTable().isSystemTable()) {
         return +1;
       }
       return RegionInfo.COMPARATOR.compare(left.getRegionInfo(), right.getRegionInfo());

http://git-wip-us.apache.org/repos/asf/hbase/blob/7a7e55b6/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestAssignProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestAssignProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestAssignProcedure.java
index ccf88de..1f93ff1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestAssignProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/snapshot/TestAssignProcedure.java
@@ -19,8 +19,11 @@
 package org.apache.hadoop.hbase.master.snapshot;
 
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -40,6 +43,7 @@ import static junit.framework.TestCase.assertTrue;
 
 @Category({RegionServerTests.class, SmallTests.class})
 public class TestAssignProcedure {
+  private static final Log LOG = LogFactory.getLog(TestAssignProcedure.class);
   @Rule public TestName name = new TestName();
   @Rule public final TestRule timeout = CategoryBasedTimeout.builder().
       withTimeout(this.getClass()).
@@ -64,11 +68,17 @@ public class TestAssignProcedure {
   @Test
   public void testComparatorWithMetas() {
     List<AssignProcedure> procedures = new ArrayList<AssignProcedure>();
+    RegionInfo user3 = RegionInfoBuilder.newBuilder(TableName.valueOf("user3")).build();
+    procedures.add(new AssignProcedure(user3));
+    RegionInfo system = RegionInfoBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME).build();
+    procedures.add(new AssignProcedure(system));
     RegionInfo user1 = RegionInfoBuilder.newBuilder(TableName.valueOf("user_space1")).build();
+    RegionInfo user2 = RegionInfoBuilder.newBuilder(TableName.valueOf("user_space2")).build();
     procedures.add(new AssignProcedure(user1));
     RegionInfo meta2 = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
         setStartKey(Bytes.toBytes("002")).build();
     procedures.add(new AssignProcedure(meta2));
+    procedures.add(new AssignProcedure(user2));
     RegionInfo meta1 = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
         setStartKey(Bytes.toBytes("001")).build();
     procedures.add(new AssignProcedure(meta1));
@@ -76,15 +86,24 @@ public class TestAssignProcedure {
     RegionInfo meta0 = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).
         setStartKey(Bytes.toBytes("000")).build();
     procedures.add(new AssignProcedure(meta0));
-    RegionInfo user2 = RegionInfoBuilder.newBuilder(TableName.valueOf("user_space2")).build();
-    procedures.add(new AssignProcedure(user2));
-    RegionInfo system = RegionInfoBuilder.newBuilder(TableName.NAMESPACE_TABLE_NAME).build();
-    procedures.add(new AssignProcedure(system));
-    procedures.sort(AssignProcedure.COMPARATOR);
-    assertTrue(procedures.get(0).getRegionInfo().equals(RegionInfoBuilder.FIRST_META_REGIONINFO));
-    assertTrue(procedures.get(1).getRegionInfo().equals(meta0));
-    assertTrue(procedures.get(2).getRegionInfo().equals(meta1));
-    assertTrue(procedures.get(3).getRegionInfo().equals(meta2));
-    assertTrue(procedures.get(4).getRegionInfo().getTable().equals(TableName.NAMESPACE_TABLE_NAME));
+    for (int i = 0; i < 10; i++) {
+      Collections.shuffle(procedures);
+      procedures.sort(AssignProcedure.COMPARATOR);
+      try {
+        assertTrue(procedures.get(0).getRegionInfo().equals(RegionInfoBuilder.FIRST_META_REGIONINFO));
+        assertTrue(procedures.get(1).getRegionInfo().equals(meta0));
+        assertTrue(procedures.get(2).getRegionInfo().equals(meta1));
+        assertTrue(procedures.get(3).getRegionInfo().equals(meta2));
+        assertTrue(procedures.get(4).getRegionInfo().getTable().equals(TableName.NAMESPACE_TABLE_NAME));
+        assertTrue(procedures.get(5).getRegionInfo().equals(user1));
+        assertTrue(procedures.get(6).getRegionInfo().equals(user2));
+        assertTrue(procedures.get(7).getRegionInfo().equals(user3));
+      } catch (Throwable t) {
+        for (AssignProcedure proc : procedures) {
+          LOG.debug(proc);
+        }
+        throw t;
+      }
+    }
   }
 }


[06/24] hbase git commit: HBASE-19122 Suspect methods on Cell to be deprecated; ADDENDUM to fix compile error

Posted by zh...@apache.org.
HBASE-19122 Suspect methods on Cell to be deprecated; ADDENDUM to fix
compile error


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6a9b1480
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6a9b1480
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6a9b1480

Branch: refs/heads/HBASE-19397
Commit: 6a9b1480c1a3cf04cc8cc1b108e549a2af76fe99
Parents: abae907
Author: Michael Stack <st...@apache.org>
Authored: Mon Dec 18 16:02:54 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Dec 18 16:02:54 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java   | 2 +-
 .../main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java    | 1 +
 2 files changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/6a9b1480/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index 96899d0..e184f7c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -1592,7 +1592,7 @@ public class MetaTableAccessor {
         .setFamily(HConstants.CATALOG_FAMILY)
         .setQualifier(getRegionStateColumn())
         .setTimestamp(put.getTimeStamp())
-        .setType(CellBuilder.DataType.Put)
+        .setType(DataType.Put)
         .setValue(Bytes.toBytes(state.name()))
         .build());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/6a9b1480/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
index 644d1e8..32286b6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
@@ -199,6 +199,7 @@ public class KeyOnlyFilter extends FilterBase {
       return cell.getType();
     }
 
+
     @Override
     public long getSequenceId() {
       return 0;


[04/24] hbase git commit: HBASE-19122 Suspect methods on Cell to be deprecated

Posted by zh...@apache.org.
HBASE-19122 Suspect methods on Cell to be deprecated


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4056d26
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4056d26
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4056d26

Branch: refs/heads/HBASE-19397
Commit: b4056d267a2f13dc31182f17cb8eaf275d703663
Parents: 9d0c7c6
Author: Michael Stack <st...@apache.org>
Authored: Mon Dec 18 15:20:15 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Dec 18 15:20:44 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hbase/MetaTableAccessor.java  |  36 ++---
 .../hadoop/hbase/filter/KeyOnlyFilter.java      |  10 ++
 .../hadoop/hbase/protobuf/ProtobufUtil.java     |   8 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java     |   7 +-
 .../org/apache/hadoop/hbase/client/TestPut.java |   7 +-
 .../hbase/ipc/TestHBaseRpcControllerImpl.java   |   6 +
 .../hadoop/hbase/ByteBufferKeyOnlyKeyValue.java |   4 +
 .../apache/hadoop/hbase/ByteBufferKeyValue.java |  33 ++++
 .../main/java/org/apache/hadoop/hbase/Cell.java |  42 ++++-
 .../org/apache/hadoop/hbase/CellBuilder.java    |  14 +-
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  16 +-
 .../org/apache/hadoop/hbase/ExtendedCell.java   |  46 ++++++
 .../hadoop/hbase/ExtendedCellBuilder.java       |  18 ++-
 .../hbase/ExtendedCellBuilderFactory.java       |  12 +-
 .../hadoop/hbase/ExtendedCellBuilderImpl.java   |  36 ++---
 .../hadoop/hbase/IndividualBytesFieldCell.java  |  33 ++++
 .../hbase/IndividualBytesFieldCellBuilder.java  |   8 -
 .../java/org/apache/hadoop/hbase/KeyValue.java  |  42 +++--
 .../apache/hadoop/hbase/KeyValueBuilder.java    |   8 -
 .../apache/hadoop/hbase/PrivateCellUtil.java    | 162 ++++++++++++++++++-
 .../java/org/apache/hadoop/hbase/RawCell.java   |   9 +-
 .../main/java/org/apache/hadoop/hbase/Tag.java  |  33 ----
 .../java/org/apache/hadoop/hbase/TagUtil.java   |  35 ++++
 .../io/encoding/BufferedDataBlockEncoder.java   |  62 +++++++
 .../apache/hadoop/hbase/TestCellBuilder.java    |   4 +-
 .../org/apache/hadoop/hbase/TestCellUtil.java   |  10 ++
 .../org/apache/hadoop/hbase/TestKeyValue.java   |   5 +
 .../example/MultiThreadedClientExample.java     |  30 ++--
 .../example/ValueRewritingObserver.java         |   3 +-
 .../example/WriteHeavyIncrementObserver.java    |   5 +-
 .../apache/hadoop/hbase/types/TestPBCell.java   |   5 +-
 .../apache/hadoop/hbase/util/MapReduceCell.java |  34 ++++
 .../apache/hadoop/hbase/rest/RowResource.java   |  12 +-
 .../RegionCoprocessorEnvironment.java           |   6 +-
 .../favored/FavoredNodeAssignmentHelper.java    |   5 +-
 .../hbase/master/TableNamespaceManager.java     |  12 +-
 .../master/assignment/RegionStateStore.java     |   8 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |   4 +-
 .../compactions/PartitionedMobCompactor.java    |   3 +-
 .../hadoop/hbase/regionserver/HMobStore.java    |   3 +-
 .../hadoop/hbase/regionserver/HRegion.java      |   2 +-
 .../regionserver/RegionCoprocessorHost.java     |  10 +-
 .../security/access/AccessControlLists.java     |   4 +-
 .../DefaultVisibilityLabelServiceImpl.java      |  18 +--
 .../VisibilityReplicationEndpoint.java          |   7 +-
 .../hbase/client/TestMultiRespectsLimits.java   |  16 +-
 .../hbase/regionserver/MockHStoreFile.java      |   8 +-
 .../TestCompactionLifeCycleTracker.java         |   6 +-
 .../regionserver/TestFlushLifeCycleTracker.java |   6 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  21 ++-
 .../hadoop/hbase/regionserver/TestHStore.java   |  11 +-
 .../security/token/TestTokenAuthentication.java |   4 +-
 .../ExpAsStringVisibilityLabelServiceImpl.java  |   3 +-
 .../hadoop/hbase/thrift/ThriftServerRunner.java |   9 +-
 .../hadoop/hbase/thrift2/ThriftUtilities.java   |   6 +-
 55 files changed, 683 insertions(+), 284 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index df2102a..96899d0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -37,6 +37,7 @@ import java.util.regex.Pattern;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
 import org.apache.hadoop.hbase.client.Consistency;
@@ -72,7 +73,6 @@ import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
 import edu.umd.cs.findbugs.annotations.NonNull;
 import edu.umd.cs.findbugs.annotations.Nullable;
 
@@ -1361,7 +1361,7 @@ public class MetaTableAccessor {
             .setFamily(HConstants.REPLICATION_BARRIER_FAMILY)
             .setQualifier(seqBytes)
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(seqBytes)
             .build())
        .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
@@ -1369,7 +1369,7 @@ public class MetaTableAccessor {
             .setFamily(HConstants.REPLICATION_META_FAMILY)
             .setQualifier(tableNameCq)
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(tableName)
             .build());
     return put;
@@ -1383,7 +1383,7 @@ public class MetaTableAccessor {
             .setFamily(HConstants.REPLICATION_META_FAMILY)
             .setQualifier(daughterNameCq)
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(value)
             .build());
     return put;
@@ -1396,7 +1396,7 @@ public class MetaTableAccessor {
             .setFamily(HConstants.REPLICATION_META_FAMILY)
             .setQualifier(parentNameCq)
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(value)
             .build());
     return put;
@@ -1413,7 +1413,7 @@ public class MetaTableAccessor {
                 .setFamily(HConstants.CATALOG_FAMILY)
                 .setQualifier(HConstants.SPLITA_QUALIFIER)
                 .setTimestamp(put.getTimeStamp())
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .setValue(RegionInfo.toByteArray(splitA))
                 .build());
     }
@@ -1423,7 +1423,7 @@ public class MetaTableAccessor {
                 .setFamily(HConstants.CATALOG_FAMILY)
                 .setQualifier(HConstants.SPLITB_QUALIFIER)
                 .setTimestamp(put.getTimeStamp())
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .setValue(RegionInfo.toByteArray(splitB))
                 .build());
     }
@@ -1732,7 +1732,7 @@ public class MetaTableAccessor {
               .setFamily(HConstants.CATALOG_FAMILY)
               .setQualifier(HConstants.MERGEA_QUALIFIER)
               .setTimestamp(putOfMerged.getTimeStamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(RegionInfo.toByteArray(regionA))
               .build())
           .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
@@ -1740,7 +1740,7 @@ public class MetaTableAccessor {
               .setFamily(HConstants.CATALOG_FAMILY)
               .setQualifier(HConstants.MERGEB_QUALIFIER)
               .setTimestamp(putOfMerged.getTimeStamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(RegionInfo.toByteArray(regionB))
               .build());
 
@@ -1985,7 +1985,7 @@ public class MetaTableAccessor {
           .setFamily(HConstants.REPLICATION_POSITION_FAMILY)
           .setQualifier(Bytes.toBytes(peerId))
           .setTimestamp(put.getTimeStamp())
-          .setType(CellBuilder.DataType.Put)
+          .setType(DataType.Put)
           .setValue(Bytes.toBytes(Math.abs(entry.getValue())))
           .build());
       puts.add(put);
@@ -2153,7 +2153,7 @@ public class MetaTableAccessor {
         .setFamily(getCatalogFamily())
         .setQualifier(HConstants.REGIONINFO_QUALIFIER)
         .setTimestamp(p.getTimeStamp())
-        .setType(CellBuilder.DataType.Put)
+        .setType(DataType.Put)
         .setValue(RegionInfo.toByteArray(hri))
         .build());
     return p;
@@ -2170,7 +2170,7 @@ public class MetaTableAccessor {
               .setFamily(getCatalogFamily())
               .setQualifier(getServerColumn(replicaId))
               .setTimestamp(time)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(Bytes.toBytes(sn.getAddress().toString()))
               .build())
             .add(builder.clear()
@@ -2178,7 +2178,7 @@ public class MetaTableAccessor {
               .setFamily(getCatalogFamily())
               .setQualifier(getStartCodeColumn(replicaId))
               .setTimestamp(time)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(Bytes.toBytes(sn.getStartcode()))
               .build())
             .add(builder.clear()
@@ -2186,7 +2186,7 @@ public class MetaTableAccessor {
               .setFamily(getCatalogFamily())
               .setQualifier(getSeqNumColumn(replicaId))
               .setTimestamp(time)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(Bytes.toBytes(openSeqNum))
               .build());
   }
@@ -2199,21 +2199,21 @@ public class MetaTableAccessor {
                 .setFamily(getCatalogFamily())
                 .setQualifier(getServerColumn(replicaId))
                 .setTimestamp(now)
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .build())
             .add(builder.clear()
                 .setRow(p.getRow())
                 .setFamily(getCatalogFamily())
                 .setQualifier(getStartCodeColumn(replicaId))
                 .setTimestamp(now)
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .build())
             .add(builder.clear()
                 .setRow(p.getRow())
                 .setFamily(getCatalogFamily())
                 .setQualifier(getSeqNumColumn(replicaId))
                 .setTimestamp(now)
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .build());
   }
 
@@ -2241,7 +2241,7 @@ public class MetaTableAccessor {
               .setFamily(HConstants.CATALOG_FAMILY)
               .setQualifier(getSeqNumColumn(replicaId))
               .setTimestamp(time)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(Bytes.toBytes(openSeqNum))
               .build());
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
index 606728e..644d1e8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java
@@ -195,6 +195,11 @@ public class KeyOnlyFilter extends FilterBase {
     }
 
     @Override
+    public DataType getType() {
+      return cell.getType();
+    }
+
+    @Override
     public long getSequenceId() {
       return 0;
     }
@@ -308,6 +313,11 @@ public class KeyOnlyFilter extends FilterBase {
     }
 
     @Override
+    public DataType getType() {
+      return cell.getType();
+    }
+
+    @Override
     public byte[] getValueArray() {
       if (lenAsVal) {
         return Bytes.toBytes(cell.getValueLength());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index f334536..267dc7a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -27,6 +27,7 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
 import com.google.protobuf.TextFormat;
+
 import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Method;
@@ -37,9 +38,10 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.NavigableSet;
 import java.util.function.Function;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
@@ -534,7 +536,7 @@ public final class ProtobufUtil {
                   .setFamily(family)
                   .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null)
                   .setTimestamp(ts)
-                  .setType(CellBuilder.DataType.Put)
+                  .setType(DataType.Put)
                   .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null)
                   .setTags(allTagsBytes)
                   .build());
@@ -554,7 +556,7 @@ public final class ProtobufUtil {
                   .setFamily(family)
                   .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null)
                   .setTimestamp(ts)
-                  .setType(CellBuilder.DataType.Put)
+                  .setType(DataType.Put)
                   .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null)
                   .build());
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index d9c699b..c9ea5a5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -38,13 +38,14 @@ import java.util.concurrent.TimeUnit;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ByteBufferCell;
 import org.apache.hadoop.hbase.CacheEvictionStats;
 import org.apache.hadoop.hbase.CacheEvictionStatsBuilder;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.CellUtil;
@@ -676,7 +677,7 @@ public final class ProtobufUtil {
                   .setFamily(family)
                   .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null)
                   .setTimestamp(ts)
-                  .setType(CellBuilder.DataType.Put)
+                  .setType(DataType.Put)
                   .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null)
                   .setTags(allTagsBytes)
                   .build());
@@ -696,7 +697,7 @@ public final class ProtobufUtil {
                   .setFamily(family)
                   .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null)
                   .setTimestamp(ts)
-                  .setType(CellBuilder.DataType.Put)
+                  .setType(DataType.Put)
                   .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null)
                   .build());
             }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPut.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPut.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPut.java
index edc8a5a..0ae2dfa 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPut.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPut.java
@@ -24,8 +24,9 @@ import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
@@ -87,7 +88,7 @@ public class TestPut {
             .setFamily(family)
             .setQualifier(qualifier0)
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(value0)
             .build())
         .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
@@ -95,7 +96,7 @@ public class TestPut {
             .setFamily(family)
             .setQualifier(qualifier1)
             .setTimestamp(ts1)
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(value1)
             .build());
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
index 0ec78ad..bfd1eb9 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java
@@ -177,6 +177,12 @@ public class TestHBaseRpcControllerImpl {
                 // unused
                 return null;
               }
+
+              @Override
+              public DataType getType() {
+                // unused
+                return null;
+              }
             };
           }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
index 3522e2d..713314e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyOnlyKeyValue.java
@@ -147,6 +147,10 @@ public class ByteBufferKeyOnlyKeyValue extends ByteBufferCell {
     return ByteBufferUtils.toByte(this.buf, this.offset + this.length - 1);
   }
 
+  public DataType getType() {
+    return PrivateCellUtil.toDataType(getTypeByte());
+  }
+
   @Override
   public long getSequenceId() {
     return 0;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
index beadaf6..870d872 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ByteBufferKeyValue.java
@@ -17,9 +17,15 @@
  */
 package org.apache.hadoop.hbase;
 
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
 
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -344,4 +350,31 @@ public class ByteBufferKeyValue extends ByteBufferCell implements ExtendedCell {
     hash = 31 * hash + cell.getTypeByte();
     return hash;
   }
+
+  @Override
+  public Optional<Tag> getTag(byte type) {
+    int length = getTagsLength();
+    int offset = getTagsPosition();
+    int pos = offset;
+    int tagLen;
+    while (pos < offset + length) {
+      ByteBuffer tagsBuffer = getTagsByteBuffer();
+      tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
+      if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) {
+        return Optional.ofNullable(new ByteBufferTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE));
+      }
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return Optional.ofNullable(null);
+  }
+
+  @Override
+  public List<Tag> getTags() {
+    List<Tag> tags = new ArrayList<>();
+    Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+    while (tagsItr.hasNext()) {
+      tags.add(tagsItr.next());
+    }
+    return tags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
index 2b99823..40f0a1c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Cell.java
@@ -133,8 +133,7 @@ public interface Cell {
 
   /**
    * @return The byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc
-   * @deprecated since 2.0.0, use appropriate {@link CellUtil#isDelete} or
-   *    {@link CellUtil#isPut(Cell)} methods instead. This will be removed in 3.0.0.
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Use {@link #getType()}.
    */
   @Deprecated
   byte getTypeByte();
@@ -148,7 +147,9 @@ public interface Cell {
    * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's
    * row is no longer involved in any operations that require strict consistency.
    * @return seqId (always &gt; 0 if exists), or 0 if it no longer exists
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0.
    */
+  @Deprecated
   long getSequenceId();
 
   //7) Value
@@ -173,12 +174,16 @@ public interface Cell {
   /**
    * Contiguous raw bytes representing tags that may start at any index in the containing array.
    * @return the tags byte array
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal.
    */
+  @Deprecated
   byte[] getTagsArray();
 
   /**
    * @return the first offset where the tags start in the Cell
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal.
    */
+  @Deprecated
   int getTagsOffset();
 
   /**
@@ -190,6 +195,39 @@ public interface Cell {
    * less than Integer.MAX_VALUE.
    *
    * @return the total length of the tags in the Cell.
+   * @deprecated As of HBase-2.0. Will be removed in HBase-3.0. Tags are are now internal.
    */
+  @Deprecated
   int getTagsLength();
+
+  /**
+   * Returns the type of cell in a human readable format using {@link DataType}
+   * @return The data type this cell: one of Put, Delete, etc
+   */
+  DataType getType();
+
+  /**
+   * The valid types for user to build the cell. Currently, This is subset of {@link KeyValue.Type}.
+   */
+  public enum DataType {
+    Put((byte) 4),
+
+    Delete((byte) 8),
+
+    DeleteFamilyVersion((byte) 10),
+
+    DeleteColumn((byte) 12),
+
+    DeleteFamily((byte) 14);
+
+    private final byte code;
+
+    DataType(final byte c) {
+      this.code = c;
+    }
+
+    public byte getCode() {
+      return this.code;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
index aeff15a..e89ac37 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellBuilder.java
@@ -26,18 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Public
 public interface CellBuilder {
 
-  /**
-   * The valid types for user to build the cell.
-   * Currently, This is subset of {@link KeyValue.Type}.
-   */
-  enum DataType {
-    Put,
-    Delete,
-    DeleteFamilyVersion,
-    DeleteColumn,
-    DeleteFamily
-  }
-
   CellBuilder setRow(final byte[] row);
   CellBuilder setRow(final byte[] row, final int rOffset, final int rLength);
 
@@ -49,7 +37,7 @@ public interface CellBuilder {
 
   CellBuilder setTimestamp(final long timestamp);
 
-  CellBuilder setType(final DataType type);
+  CellBuilder setType(final Cell.DataType type);
 
   CellBuilder setValue(final byte[] value);
   CellBuilder setValue(final byte[] value, final int vOffset, final int vLength);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index 3a8307c..f320083 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -564,40 +564,30 @@ public final class CellUtil {
    * Note : Now only CPs can create cell with tags using the CP environment
    * @return A new cell which is having the extra tags also added to it.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
-   *             Use CP environment to build Cell using {@link ExtendedCellBuilder}
    *
    */
   @Deprecated
   public static Cell createCell(Cell cell, List<Tag> tags) {
-    return createCell(cell, Tag.fromList(tags));
+    return PrivateCellUtil.createCell(cell, tags);
   }
 
   /**
    * Now only CPs can create cell with tags using the CP environment
    * @return A new cell which is having the extra tags also added to it.
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
-   *            Use CP environment to build Cell using {@link ExtendedCellBuilder}
    */
   @Deprecated
   public static Cell createCell(Cell cell, byte[] tags) {
-    if (cell instanceof ByteBufferCell) {
-      return new PrivateCellUtil.TagRewriteByteBufferCell((ByteBufferCell) cell, tags);
-    }
-    return new PrivateCellUtil.TagRewriteCell(cell, tags);
+    return PrivateCellUtil.createCell(cell, tags);
   }
 
   /**
    * Now only CPs can create cell with tags using the CP environment
    * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
-   *             Use CP environment to build Cell using {@link ExtendedCellBuilder}
    */
   @Deprecated
   public static Cell createCell(Cell cell, byte[] value, byte[] tags) {
-    if (cell instanceof ByteBufferCell) {
-      return new PrivateCellUtil.ValueAndTagRewriteByteBufferCell((ByteBufferCell) cell, value,
-          tags);
-    }
-    return new PrivateCellUtil.ValueAndTagRewriteCell(cell, value, tags);
+    return PrivateCellUtil.createCell(cell, value, tags);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
index 81ca018..31df296 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCell.java
@@ -131,4 +131,50 @@ public interface ExtendedCell extends RawCell, HeapSize, Cloneable {
    * @param ts buffer containing the timestamp value
    */
   void setTimestamp(byte[] ts) throws IOException;
+
+  /**
+   * A region-specific unique monotonically increasing sequence ID given to each Cell. It always
+   * exists for cells in the memstore but is not retained forever. It will be kept for
+   * {@link HConstants#KEEP_SEQID_PERIOD} days, but generally becomes irrelevant after the cell's
+   * row is no longer involved in any operations that require strict consistency.
+   * @return seqId (always &gt; 0 if exists), or 0 if it no longer exists
+   */
+  long getSequenceId();
+
+  /**
+   * Contiguous raw bytes representing tags that may start at any index in the containing array.
+   * @return the tags byte array
+   */
+  byte[] getTagsArray();
+
+  /**
+   * @return the first offset where the tags start in the Cell
+   */
+  int getTagsOffset();
+
+  /**
+   * HBase internally uses 2 bytes to store tags length in Cell. As the tags length is always a
+   * non-negative number, to make good use of the sign bit, the max of tags length is defined 2 *
+   * Short.MAX_VALUE + 1 = 65535. As a result, the return type is int, because a short is not
+   * capable of handling that. Please note that even if the return type is int, the max tags length
+   * is far less than Integer.MAX_VALUE.
+   * @return the total length of the tags in the Cell.
+   */
+  int getTagsLength();
+
+  /**
+   * {@inheritDoc}
+   * <p>
+   * Note : This does not expose the internal types of Cells like {@link KeyValue.Type#Maximum} and
+   * {@link KeyValue.Type#Minimum}
+   */
+  @Override
+  default DataType getType() {
+    return PrivateCellUtil.toDataType(getTypeByte());
+  }
+
+  /**
+   * @return The byte representation of the KeyValue.TYPE of this cell: one of Put, Delete, etc
+   */
+  byte getTypeByte();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilder.java
index 57fa44e..b964d67 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.util.List;
+
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -26,8 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience;
  * Use {@link ExtendedCellBuilderFactory} to get ExtendedCellBuilder instance.
  * TODO: ditto for ByteBufferCell?
  */
-@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
-public interface ExtendedCellBuilder extends CellBuilder {
+@InterfaceAudience.Private
+public interface ExtendedCellBuilder extends RawCellBuilder {
   @Override
   ExtendedCellBuilder setRow(final byte[] row);
   @Override
@@ -47,7 +49,7 @@ public interface ExtendedCellBuilder extends CellBuilder {
   ExtendedCellBuilder setTimestamp(final long timestamp);
 
   @Override
-  ExtendedCellBuilder setType(final DataType type);
+  ExtendedCellBuilder setType(final Cell.DataType type);
 
   ExtendedCellBuilder setType(final byte type);
 
@@ -62,11 +64,17 @@ public interface ExtendedCellBuilder extends CellBuilder {
   @Override
   ExtendedCellBuilder clear();
 
-  // TODO : While creating RawCellBuilder allow 'Tag' to be passed instead of byte[]
+  // we have this method for performance reasons so that if one could create a cell directly from
+  // the tag byte[] of the cell without having to convert to a list of Tag(s) and again adding it
+  // back.
   ExtendedCellBuilder setTags(final byte[] tags);
-  // TODO : While creating RawCellBuilder allow 'Tag' to be passed instead of byte[]
+  // we have this method for performance reasons so that if one could create a cell directly from
+  // the tag byte[] of the cell without having to convert to a list of Tag(s) and again adding it
+  // back.
   ExtendedCellBuilder setTags(final byte[] tags, int tagsOffset, int tagsLength);
 
+  @Override
+  ExtendedCellBuilder setTags(List<Tag> tags);
   /**
    * Internal usage. Be careful before you use this while building a cell
    * @param seqId set the seqId

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderFactory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderFactory.java
index 38778fb..f3acdf4 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderFactory.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderFactory.java
@@ -24,25 +24,17 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 public final class ExtendedCellBuilderFactory {
 
-  public static ExtendedCellBuilder create(CellBuilderType type) {
-    return create(type, true);
-  }
-
   /**
    * Allows creating a cell with the given CellBuilderType.
    * @param type the type of CellBuilder(DEEP_COPY or SHALLOW_COPY).
-   * @param allowSeqIdUpdate if seqId can be updated. CPs are not allowed to update
-   *        the seqId
    * @return the cell that is created
    */
-  public static ExtendedCellBuilder create(CellBuilderType type, boolean allowSeqIdUpdate) {
+  public static ExtendedCellBuilder create(CellBuilderType type) {
     switch (type) {
       case SHALLOW_COPY:
-        // CPs are not allowed to update seqID and they always use DEEP_COPY. So we have not
-        // passing 'allowSeqIdUpdate' to IndividualBytesFieldCellBuilder
         return new IndividualBytesFieldCellBuilder();
       case DEEP_COPY:
-        return new KeyValueBuilder(allowSeqIdUpdate);
+        return new KeyValueBuilder();
       default:
         throw new UnsupportedOperationException("The type:" + type + " is unsupported");
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
index 536dbdc..770b61d 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase;
 
+import java.util.List;
+
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.yetus.audience.InterfaceAudience;
 
@@ -40,12 +42,6 @@ public abstract class ExtendedCellBuilderImpl implements ExtendedCellBuilder {
   protected byte[] tags = null;
   protected int tagsOffset = 0;
   protected int tagsLength = 0;
-  // Will go away once we do with RawCellBuilder
-  protected boolean allowSeqIdUpdate = false;
-
-  public ExtendedCellBuilderImpl(boolean allowSeqIdUpdate) {
-    this.allowSeqIdUpdate = allowSeqIdUpdate;
-  }
 
   @Override
   public ExtendedCellBuilder setRow(final byte[] row) {
@@ -93,8 +89,8 @@ public abstract class ExtendedCellBuilderImpl implements ExtendedCellBuilder {
   }
 
   @Override
-  public ExtendedCellBuilder setType(final DataType type) {
-    this.type = toKeyValueType(type);
+  public ExtendedCellBuilder setType(final Cell.DataType type) {
+    this.type = PrivateCellUtil.toTypeByte(type);
     return this;
   }
 
@@ -131,12 +127,15 @@ public abstract class ExtendedCellBuilderImpl implements ExtendedCellBuilder {
   }
 
   @Override
+  public ExtendedCellBuilder setTags(List<Tag> tags) {
+    byte[] tagBytes = TagUtil.fromList(tags);
+    return setTags(tagBytes);
+  }
+
+  @Override
   public ExtendedCellBuilder setSequenceId(final long seqId) {
-    if (allowSeqIdUpdate) {
-      this.seqId = seqId;
-      return this;
-    }
-    throw new UnsupportedOperationException("SeqId cannot be set on this cell");
+    this.seqId = seqId;
+    return this;
   }
 
   private void checkBeforeBuild() {
@@ -175,15 +174,4 @@ public abstract class ExtendedCellBuilderImpl implements ExtendedCellBuilder {
     tagsLength = 0;
     return this;
   }
-
-  private static KeyValue.Type toKeyValueType(DataType type) {
-    switch (type) {
-      case Put: return KeyValue.Type.Put;
-      case Delete: return KeyValue.Type.Delete;
-      case DeleteColumn: return KeyValue.Type.DeleteColumn;
-      case DeleteFamilyVersion: return KeyValue.Type.DeleteFamilyVersion;
-      case DeleteFamily: return KeyValue.Type.DeleteFamily;
-      default: throw new UnsupportedOperationException("Unsupported data type:" + type);
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
index 7093b4b..a25bd19 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCell.java
@@ -18,6 +18,13 @@
 
 package org.apache.hadoop.hbase;
 
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
+
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
@@ -297,4 +304,30 @@ public class IndividualBytesFieldCell implements ExtendedCell {
   public String toString() {
     return CellUtil.toString(this, true);
   }
+
+  @Override
+  public Optional<Tag> getTag(byte type) {
+    int length = getTagsLength();
+    int offset = getTagsOffset();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen = Bytes.readAsInt(getTagsArray(), pos, TAG_LENGTH_SIZE);
+      if (getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+        return Optional
+            .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
+      }
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return Optional.ofNullable(null);
+  }
+
+  @Override
+  public List<Tag> getTags() {
+    List<Tag> tags = new ArrayList<>();
+    Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+    while (tagsItr.hasNext()) {
+      tags.add(tagsItr.next());
+    }
+    return tags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCellBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCellBuilder.java
index 62febf8..8a0168e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCellBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/IndividualBytesFieldCellBuilder.java
@@ -22,14 +22,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 class IndividualBytesFieldCellBuilder extends ExtendedCellBuilderImpl {
 
-  public IndividualBytesFieldCellBuilder() {
-    this(true);
-  }
-
-  public IndividualBytesFieldCellBuilder(boolean allowSeqIdUpdate) {
-    super(allowSeqIdUpdate);
-  }
-
   @Override
   public ExtendedCell innerBuild() {
     return new IndividualBytesFieldCell(row, rOffset, rLength,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index 7093650..88e7d88 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -19,6 +19,7 @@
  */
 package org.apache.hadoop.hbase;
 
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 import static org.apache.hadoop.hbase.util.Bytes.len;
 
 import java.io.DataInput;
@@ -29,8 +30,10 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -1522,19 +1525,6 @@ public class KeyValue implements ExtendedCell {
   }
 
   /**
-   * Returns any tags embedded in the KeyValue.  Used in testcases.
-   * @return The tags
-   */
-  @Override
-  public List<Tag> getTags() {
-    int tagsLength = getTagsLength();
-    if (tagsLength == 0) {
-      return EMPTY_ARRAY_LIST;
-    }
-    return TagUtil.asList(getTagsArray(), getTagsOffset(), tagsLength);
-  }
-
-  /**
    * @return the backing array of the entire KeyValue (all KeyValue fields are in a single array)
    */
   @Override
@@ -2564,4 +2554,30 @@ public class KeyValue implements ExtendedCell {
     kv.setSequenceId(this.getSequenceId());
     return kv;
   }
+
+  @Override
+  public Optional<Tag> getTag(byte type) {
+    int length = getTagsLength();
+    int offset = getTagsOffset();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen = Bytes.readAsInt(getTagsArray(), pos, TAG_LENGTH_SIZE);
+      if (getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+        return Optional
+            .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
+      }
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return Optional.ofNullable(null);
+  }
+
+  @Override
+  public List<Tag> getTags() {
+    List<Tag> tags = new ArrayList<>();
+    Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+    while (tagsItr.hasNext()) {
+      tags.add(tagsItr.next());
+    }
+    return tags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueBuilder.java
index 4f01992..9480b71 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueBuilder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueBuilder.java
@@ -22,14 +22,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 @InterfaceAudience.Private
 class KeyValueBuilder extends ExtendedCellBuilderImpl {
 
-  KeyValueBuilder() {
-    this(true);
-  }
-
-  KeyValueBuilder(boolean allowSeqIdUpdate) {
-    super(allowSeqIdUpdate);
-  }
-
   @Override
   protected ExtendedCell innerBuild() {
     KeyValue kv = new KeyValue(row, rOffset, rLength,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
index df080f3..e52ed84 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/PrivateCellUtil.java
@@ -21,6 +21,7 @@ import static org.apache.hadoop.hbase.HConstants.EMPTY_BYTE_ARRAY;
 import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
 
 import com.google.common.annotations.VisibleForTesting;
+
 import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -31,6 +32,7 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Optional;
+
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.filter.ByteArrayComparable;
 import org.apache.hadoop.hbase.io.HeapSize;
@@ -43,6 +45,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.ClassSize;
 import org.apache.yetus.audience.InterfaceAudience;
 
+
 /**
  * Utility methods helpful slinging {@link Cell} instances. It has more powerful and
  * rich set of APIs than those in {@link CellUtil} for internal usage.
@@ -107,7 +110,7 @@ public final class PrivateCellUtil {
    * @return A new cell which is having the extra tags also added to it.
    */
   public static Cell createCell(Cell cell, List<Tag> tags) {
-    return createCell(cell, Tag.fromList(tags));
+    return createCell(cell, TagUtil.fromList(tags));
   }
 
   /**
@@ -311,6 +314,32 @@ public final class PrivateCellUtil {
       Cell clonedBaseCell = ((ExtendedCell) this.cell).deepClone();
       return new TagRewriteCell(clonedBaseCell, this.tags);
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsOffset();
+      int pos = offset;
+      while (pos < offset + length) {
+        int tagLen = Bytes.readAsInt(getTagsArray(), pos, TAG_LENGTH_SIZE);
+        if (getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+          return Optional
+              .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
+        }
+        pos += TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
   }
 
   static class TagRewriteByteBufferCell extends ByteBufferCell implements ExtendedCell {
@@ -544,6 +573,33 @@ public final class PrivateCellUtil {
     public int getTagsPosition() {
       return 0;
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsPosition();
+      int pos = offset;
+      int tagLen;
+      while (pos < offset + length) {
+        ByteBuffer tagsBuffer = getTagsByteBuffer();
+        tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
+        if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) {
+          return Optional.ofNullable(new ByteBufferTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE));
+        }
+        pos += TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
   }
 
   static class ValueAndTagRewriteCell extends TagRewriteCell {
@@ -928,7 +984,7 @@ public final class PrivateCellUtil {
     return CellUtil.tagsIterator(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength());
   }
 
-  private static Iterator<Tag> tagsIterator(final ByteBuffer tags, final int offset,
+  public static Iterator<Tag> tagsIterator(final ByteBuffer tags, final int offset,
       final int length) {
     return new Iterator<Tag>() {
       private int pos = offset;
@@ -1231,6 +1287,29 @@ public final class PrivateCellUtil {
       cell.getQualifierLength());
   }
 
+  public static Cell.DataType toDataType(byte type) {
+    Type codeToType = KeyValue.Type.codeToType(type);
+    switch (codeToType) {
+      case Put: return Cell.DataType.Put;
+      case Delete: return Cell.DataType.Delete;
+      case DeleteColumn: return Cell.DataType.DeleteColumn;
+      case DeleteFamily: return Cell.DataType.DeleteFamily;
+      case DeleteFamilyVersion: return Cell.DataType.DeleteFamilyVersion;
+      default: throw new UnsupportedOperationException("Invalid type of cell "+type);
+    }
+  }
+
+  public static KeyValue.Type toTypeByte(Cell.DataType type) {
+    switch (type) {
+      case Put: return KeyValue.Type.Put;
+      case Delete: return KeyValue.Type.Delete;
+      case DeleteColumn: return KeyValue.Type.DeleteColumn;
+      case DeleteFamilyVersion: return KeyValue.Type.DeleteFamilyVersion;
+      case DeleteFamily: return KeyValue.Type.DeleteFamily;
+      default: throw new UnsupportedOperationException("Unsupported data type:" + type);
+    }
+  }
+
   /**
    * Compare cell's value against given comparator
    * @param cell
@@ -1345,6 +1424,32 @@ public final class PrivateCellUtil {
     public int getTagsLength() {
       return 0;
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsOffset();
+      int pos = offset;
+      while (pos < offset + length) {
+        int tagLen = Bytes.readAsInt(getTagsArray(), pos, TAG_LENGTH_SIZE);
+        if (getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+          return Optional
+              .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
+        }
+        pos += TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
   }
 
   /**
@@ -1498,6 +1603,33 @@ public final class PrivateCellUtil {
     public int getValuePosition() {
       return 0;
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsPosition();
+      int pos = offset;
+      int tagLen;
+      while (pos < offset + length) {
+        ByteBuffer tagsBuffer = getTagsByteBuffer();
+        tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, TAG_LENGTH_SIZE);
+        if (ByteBufferUtils.toByte(tagsBuffer, pos + TAG_LENGTH_SIZE) == type) {
+          return Optional.ofNullable(new ByteBufferTag(tagsBuffer, pos, tagLen + TAG_LENGTH_SIZE));
+        }
+        pos += TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
   }
 
   private static class FirstOnRowCell extends EmptyCell {
@@ -1547,6 +1679,11 @@ public final class PrivateCellUtil {
     public byte getTypeByte() {
       return Type.Maximum.getCode();
     }
+
+    @Override
+    public DataType getType() {
+      throw new UnsupportedOperationException();
+    }
   }
 
   private static class FirstOnRowByteBufferCell extends EmptyByteBufferCell {
@@ -1597,6 +1734,11 @@ public final class PrivateCellUtil {
     public byte getTypeByte() {
       return Type.Maximum.getCode();
     }
+
+    @Override
+    public DataType getType() {
+      throw new UnsupportedOperationException();
+    }
   }
 
   private static class LastOnRowByteBufferCell extends EmptyByteBufferCell {
@@ -1647,6 +1789,11 @@ public final class PrivateCellUtil {
     public byte getTypeByte() {
       return Type.Minimum.getCode();
     }
+
+    @Override
+    public DataType getType() {
+      throw new UnsupportedOperationException();
+    }
   }
 
   private static class FirstOnRowColByteBufferCell extends FirstOnRowByteBufferCell {
@@ -1875,6 +2022,11 @@ public final class PrivateCellUtil {
     public byte getTypeByte() {
       return Type.Minimum.getCode();
     }
+
+    @Override
+    public DataType getType() {
+      throw new UnsupportedOperationException();
+    }
   }
 
   private static class LastOnRowColCell extends LastOnRowCell {
@@ -2060,6 +2212,11 @@ public final class PrivateCellUtil {
     public byte getTypeByte() {
       return Type.DeleteFamily.getCode();
     }
+
+    @Override
+    public DataType getType() {
+      return DataType.DeleteFamily;
+    }
   }
 
   /**
@@ -2890,5 +3047,4 @@ public final class PrivateCellUtil {
   public static Cell createFirstDeleteFamilyCellOnRow(final byte[] row, final byte[] fam) {
     return new FirstOnRowDeleteFamilyCell(row, fam);
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java
index 9e25a9a..4cda7d5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCell.java
@@ -41,19 +41,14 @@ public interface RawCell extends Cell {
    * Creates a list of tags in the current cell
    * @return a list of tags
    */
-  default List<Tag> getTags() {
-    return PrivateCellUtil.getTags(this);
-  }
+  List<Tag> getTags();
 
   /**
    * Returns the specific tag of the given type
    * @param type the type of the tag
    * @return the specific tag if available or null
    */
-  // TODO : Move to individual cell impl
-  default Optional<Tag> getTag(byte type) {
-    return PrivateCellUtil.getTag(this, type);
-  }
+  Optional<Tag> getTag(byte type);
 
   /**
    * Check the length of tags. If it is invalid, throw IllegalArgumentException

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
index 8709814..6f9bfdc 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hbase;
 
 import java.nio.ByteBuffer;
-import java.util.List;
 
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -153,38 +152,6 @@ public interface Tag {
   }
 
   /**
-   * Write a list of tags into a byte array
-   * @param tags The list of tags
-   * @return the serialized tag data as bytes
-   */
-  // TODO : Remove this when we move to RawCellBuilder
-  public static byte[] fromList(List<Tag> tags) {
-    if (tags == null || tags.isEmpty()) {
-      return HConstants.EMPTY_BYTE_ARRAY;
-    }
-    int length = 0;
-    for (Tag tag : tags) {
-      length += tag.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
-    }
-    byte[] b = new byte[length];
-    int pos = 0;
-    int tlen;
-    for (Tag tag : tags) {
-      tlen = tag.getValueLength();
-      pos = Bytes.putAsShort(b, pos, tlen + Tag.TYPE_LENGTH_SIZE);
-      pos = Bytes.putByte(b, pos, tag.getType());
-      if (tag.hasArray()) {
-        pos = Bytes.putBytes(b, pos, tag.getValueArray(), tag.getValueOffset(), tlen);
-      } else {
-        ByteBufferUtils.copyFromBufferToArray(b, tag.getValueByteBuffer(), tag.getValueOffset(),
-          pos, tlen);
-        pos += tlen;
-      }
-    }
-    return b;
-  }
-
-  /**
    * Converts the value bytes of the given tag into a long value
    * @param tag The Tag
    * @return value as long

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
index 6ad66ba..34c78a5 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagUtil.java
@@ -137,6 +137,41 @@ public final class TagUtil {
   }
 
   /**
+   * Write a list of tags into a byte array
+   * Note : these are all purely internal APIs. It helps in
+   * cases where we have set of tags and we would want to create a cell out of it. Say in Mobs we
+   * create a reference tags to indicate the presence of mob data. Also note that these are not
+   * exposed to CPs also
+   * @param tags The list of tags
+   * @return the serialized tag data as bytes
+   */
+  public static byte[] fromList(List<Tag> tags) {
+    if (tags == null || tags.isEmpty()) {
+      return HConstants.EMPTY_BYTE_ARRAY;
+    }
+    int length = 0;
+    for (Tag tag : tags) {
+      length += tag.getValueLength() + Tag.INFRASTRUCTURE_SIZE;
+    }
+    byte[] b = new byte[length];
+    int pos = 0;
+    int tlen;
+    for (Tag tag : tags) {
+      tlen = tag.getValueLength();
+      pos = Bytes.putAsShort(b, pos, tlen + Tag.TYPE_LENGTH_SIZE);
+      pos = Bytes.putByte(b, pos, tag.getType());
+      if (tag.hasArray()) {
+        pos = Bytes.putBytes(b, pos, tag.getValueArray(), tag.getValueOffset(), tlen);
+      } else {
+        ByteBufferUtils.copyFromBufferToArray(b, tag.getValueByteBuffer(), tag.getValueOffset(),
+          pos, tlen);
+        pos += tlen;
+      }
+    }
+    return b;
+  }
+
+  /**
    * Iterator returned when no Tags. Used by CellUtil too.
    */
   static final Iterator<Tag> EMPTY_TAGS_ITR = new Iterator<Tag>() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
index 9bcda01..f4d3c40 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/encoding/BufferedDataBlockEncoder.java
@@ -21,8 +21,14 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
 
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.ByteBufferCell;
+import org.apache.hadoop.hbase.ByteBufferTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellUtil;
@@ -32,6 +38,7 @@ import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.io.TagCompressionContext;
 import org.apache.hadoop.hbase.io.util.LRUDictionary;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
@@ -475,6 +482,32 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
       // This is not used in actual flow. Throwing UnsupportedOperationException
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsOffset();
+      int pos = offset;
+      while (pos < offset + length) {
+        int tagLen = Bytes.readAsInt(getTagsArray(), pos, Tag.TAG_LENGTH_SIZE);
+        if (getTagsArray()[pos + Tag.TAG_LENGTH_SIZE] == type) {
+          return Optional
+              .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + Tag.TAG_LENGTH_SIZE));
+        }
+        pos += Tag.TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
   }
 
   protected static class OffheapDecodedCell extends ByteBufferCell implements ExtendedCell {
@@ -720,6 +753,35 @@ abstract class BufferedDataBlockEncoder extends AbstractDataBlockEncoder {
       // This is not used in actual flow. Throwing UnsupportedOperationException
       throw new UnsupportedOperationException();
     }
+
+    @Override
+    public Optional<Tag> getTag(byte type) {
+      int length = getTagsLength();
+      int offset = getTagsPosition();
+      int pos = offset;
+      int tagLen;
+      while (pos < offset + length) {
+        ByteBuffer tagsBuffer = getTagsByteBuffer();
+        tagLen = ByteBufferUtils.readAsInt(tagsBuffer, pos, Tag.TAG_LENGTH_SIZE);
+        if (ByteBufferUtils.toByte(tagsBuffer, pos + Tag.TAG_LENGTH_SIZE) == type) {
+          return Optional
+              .ofNullable(new ByteBufferTag(tagsBuffer, pos, tagLen + Tag.TAG_LENGTH_SIZE));
+        }
+        pos += Tag.TAG_LENGTH_SIZE + tagLen;
+      }
+      return Optional.ofNullable(null);
+    }
+
+    @Override
+    public List<Tag> getTags() {
+      List<Tag> tags = new ArrayList<>();
+      Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+      while (tagsItr.hasNext()) {
+        tags.add(tagsItr.next());
+      }
+      return tags;
+    }
+
   }
 
   protected abstract static class BufferedEncodedSeeker<STATE extends SeekerState>

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java
index ad18547..5c6c65a 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellBuilder.java
@@ -41,7 +41,7 @@ public class TestCellBuilder {
             .setRow(row)
             .setFamily(family)
             .setQualifier(qualifier)
-            .setType(CellBuilder.DataType.Put)
+            .setType(Cell.DataType.Put)
             .setValue(value)
             .build();
     row[0] = NEW_DATA;
@@ -64,7 +64,7 @@ public class TestCellBuilder {
             .setRow(row)
             .setFamily(family)
             .setQualifier(qualifier)
-            .setType(CellBuilder.DataType.Put)
+            .setType(Cell.DataType.Put)
             .setValue(value)
             .build();
     row[0] = NEW_DATA;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
index 0395c09..4ab6bce 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestCellUtil.java
@@ -198,6 +198,11 @@ public class TestCellUtil {
       // TODO Auto-generated method stub
       return 0;
     }
+
+    @Override
+    public DataType getType() {
+      return PrivateCellUtil.toDataType(getTypeByte());
+    }
   };
 
   /**
@@ -613,5 +618,10 @@ public class TestCellUtil {
     public int getTagsLength() {
       return this.kv.getTagsLength();
     }
+
+    @Override
+    public DataType getType() {
+      return PrivateCellUtil.toDataType(getTypeByte());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
index 86891ae..c6b7265 100644
--- a/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
+++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/TestKeyValue.java
@@ -737,5 +737,10 @@ public class TestKeyValue extends TestCase {
     public byte[] getTagsArray() {
       return this.kv.getTagsArray();
     }
+
+    @Override
+    public DataType getType() {
+      return PrivateCellUtil.toDataType(getTypeByte());
+    }
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java
index 5d95fde..e460316 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/MultiThreadedClientExample.java
@@ -19,10 +19,22 @@
 package org.apache.hadoop.hbase.client.example;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.TableName;
@@ -39,18 +51,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
-
 
 /**
  * Example on how to use HBase's {@link Connection} and {@link Table} in a
@@ -226,7 +226,7 @@ public class MultiThreadedClientExample extends Configured implements Tool {
                 .setFamily(FAMILY)
                 .setQualifier(QUAL)
                 .setTimestamp(p.getTimeStamp())
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .setValue(value)
                 .build());
           puts.add(p);
@@ -263,7 +263,7 @@ public class MultiThreadedClientExample extends Configured implements Tool {
                 .setFamily(FAMILY)
                 .setQualifier(QUAL)
                 .setTimestamp(p.getTimeStamp())
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .setValue(value)
                 .build());
         t.put(p);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java
index 863ea89..cf7796b 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ValueRewritingObserver.java
@@ -22,7 +22,6 @@ import java.util.Optional;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellBuilder;
-import org.apache.hadoop.hbase.CellBuilder.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -89,7 +88,7 @@ public class ValueRewritingObserver implements RegionObserver, RegionCoprocessor
               cellBuilder.setFamily(CellUtil.cloneFamily(c));
               cellBuilder.setQualifier(CellUtil.cloneQualifier(c));
               cellBuilder.setTimestamp(c.getTimestamp());
-              cellBuilder.setType(DataType.Put);
+              cellBuilder.setType(Cell.DataType.Put);
               // Make sure each cell gets a unique value
               byte[] clonedValue = new byte[replacedValue.length];
               System.arraycopy(replacedValue, 0, clonedValue, 0, replacedValue.length);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
index 55d9ac3..63637b5 100644
--- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
+++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/WriteHeavyIncrementObserver.java
@@ -29,7 +29,6 @@ import java.util.stream.IntStream;
 
 import org.apache.commons.lang3.mutable.MutableLong;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -80,7 +79,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs
 
   private Cell createCell(byte[] row, byte[] family, byte[] qualifier, long ts, long value) {
     return CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row)
-        .setType(CellBuilder.DataType.Put).setFamily(family).setQualifier(qualifier)
+        .setType(Cell.DataType.Put).setFamily(family).setQualifier(qualifier)
         .setTimestamp(ts).setValue(Bytes.toBytes(value)).build();
   }
 
@@ -250,7 +249,7 @@ public class WriteHeavyIncrementObserver implements RegionCoprocessor, RegionObs
             .setQualifier(cell.getQualifierArray(), cell.getQualifierOffset(),
               cell.getQualifierLength())
             .setValue(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())
-            .setType(CellBuilder.DataType.Put).setTimestamp(ts).build());
+            .setType(Cell.DataType.Put).setTimestamp(ts).build());
       }
     }
     c.getEnvironment().getRegion().put(put);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java
----------------------------------------------------------------------
diff --git a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java
index 7f94f93..77c9e22 100644
--- a/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java
+++ b/hbase-examples/src/test/java/org/apache/hadoop/hbase/types/TestPBCell.java
@@ -46,7 +46,7 @@ public class TestPBCell {
   @Test
   public void testRoundTrip() {
     final Cell cell = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("fam"),
-      Bytes.toBytes("qual"), Bytes.toBytes("val"));
+        Bytes.toBytes("qual"), Bytes.toBytes("val"));
     CellProtos.Cell c = ProtobufUtil.toCell(cell), decoded;
     PositionedByteRange pbr = new SimplePositionedByteRange(c.getSerializedSize());
     pbr.setPosition(0);
@@ -54,6 +54,7 @@ public class TestPBCell {
     pbr.setPosition(0);
     decoded = CODEC.decode(pbr);
     assertEquals(encodedLength, pbr.getPosition());
-    assertTrue(CellUtil.equals(cell, ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), decoded)));
+    assertTrue(CellUtil.equals(cell, ProtobufUtil
+        .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), decoded)));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java
index 38ff59b..ae47e7a 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapReduceCell.java
@@ -17,15 +17,23 @@
  */
 package org.apache.hadoop.hbase.util;
 
+import static org.apache.hadoop.hbase.Tag.TAG_LENGTH_SIZE;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
 
+import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.ByteBufferCell;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.ExtendedCell;
 import org.apache.hadoop.hbase.PrivateCellUtil;
+import org.apache.hadoop.hbase.Tag;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -268,4 +276,30 @@ public class MapReduceCell extends ByteBufferCell implements ExtendedCell {
       throw new RuntimeException(e);
     }
   }
+
+  @Override
+  public Optional<Tag> getTag(byte type) {
+    int length = getTagsLength();
+    int offset = getTagsOffset();
+    int pos = offset;
+    while (pos < offset + length) {
+      int tagLen = Bytes.readAsInt(getTagsArray(), pos, TAG_LENGTH_SIZE);
+      if (getTagsArray()[pos + TAG_LENGTH_SIZE] == type) {
+        return Optional
+            .ofNullable(new ArrayBackedTag(getTagsArray(), pos, tagLen + TAG_LENGTH_SIZE));
+      }
+      pos += TAG_LENGTH_SIZE + tagLen;
+    }
+    return Optional.ofNullable(null);
+  }
+
+  @Override
+  public List<Tag> getTags() {
+    List<Tag> tags = new ArrayList<>();
+    Iterator<Tag> tagsItr = PrivateCellUtil.tagsIterator(this);
+    while (tagsItr.hasNext()) {
+      tags.add(tagsItr.next());
+    }
+    return tags;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
----------------------------------------------------------------------
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
index dead804..8c1cb5b 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java
@@ -39,11 +39,10 @@ import javax.ws.rs.core.UriInfo;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Append;
@@ -56,6 +55,7 @@ import org.apache.hadoop.hbase.rest.model.CellModel;
 import org.apache.hadoop.hbase.rest.model.CellSetModel;
 import org.apache.hadoop.hbase.rest.model.RowModel;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class RowResource extends ResourceBase {
@@ -246,7 +246,7 @@ public class RowResource extends ResourceBase {
               .setFamily(parts[0])
               .setQualifier(parts[1])
               .setTimestamp(cell.getTimestamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(cell.getValue())
               .build());
         }
@@ -321,7 +321,7 @@ public class RowResource extends ResourceBase {
         .setFamily(parts[0])
         .setQualifier(parts[1])
         .setTimestamp(timestamp)
-        .setType(CellBuilder.DataType.Put)
+        .setType(DataType.Put)
         .setValue(message)
         .build());
       table = servlet.getTable(tableResource.getName());
@@ -518,7 +518,7 @@ public class RowResource extends ResourceBase {
               .setFamily(parts[0])
               .setQualifier(parts[1])
               .setTimestamp(cell.getTimestamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(cell.getValue())
               .build());
           if(Bytes.equals(col,

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
index 3380639..84e6d25 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionCoprocessorEnvironment.java
@@ -24,8 +24,8 @@ import java.util.concurrent.ConcurrentMap;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CoprocessorEnvironment;
-import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.RawCellBuilder;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
@@ -127,7 +127,7 @@ public interface RegionCoprocessorEnvironment extends CoprocessorEnvironment<Reg
   /**
    * Returns a CellBuilder so that coprocessors can build cells. These cells can also include tags.
    * Note that this builder does not support updating seqId of the cells
-   * @return the ExtendedCellBuilder
+   * @return the RawCellBuilder
    */
-  ExtendedCellBuilder getCellBuilder();
+  RawCellBuilder getCellBuilder();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 14b2466..136453a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -35,7 +35,7 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.HBaseIOException;
@@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.master.RackManager;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
-
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -184,7 +183,7 @@ public class FavoredNodeAssignmentHelper {
           .setFamily(HConstants.CATALOG_FAMILY)
           .setQualifier(FAVOREDNODES_QUALIFIER)
           .setTimestamp(EnvironmentEdgeManager.currentTime())
-          .setType(CellBuilder.DataType.Put)
+          .setType(DataType.Put)
           .setValue(favoredNodes)
           .build());
       LOG.debug("Create the region " + regionInfo.getRegionNameAsString() +

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
index 6a138ff..174272e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/TableNamespaceManager.java
@@ -26,37 +26,35 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.ZKNamespaceManager;
-import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.constraint.ConstraintException;
 import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.Threads;
-
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
+import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * This is a helper class used internally to manage the namespace metadata that is stored in
@@ -160,7 +158,7 @@ public class TableNamespaceManager {
           .setFamily(TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES)
           .setQualifier(TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES)
           .setTimestamp(p.getTimeStamp())
-          .setType(CellBuilder.DataType.Put)
+          .setType(DataType.Put)
           .setValue(ProtobufUtil.toProtoNamespaceDescriptor(ns).toByteArray())
           .build());
     nsTable.put(p);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
index 0b49b36..079dbd5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionStateStore.java
@@ -23,10 +23,11 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.HConstants;
@@ -51,7 +52,6 @@ import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-
 /**
  * Store Region State to hbase:meta table.
  */
@@ -185,7 +185,7 @@ public class RegionStateStore {
           .setFamily(HConstants.CATALOG_FAMILY)
           .setQualifier(getServerNameColumn(replicaId))
           .setTimestamp(put.getTimeStamp())
-          .setType(CellBuilder.DataType.Put)
+          .setType(DataType.Put)
           .setValue(Bytes.toBytes(regionLocation.getServerName()))
           .build());
       info.append(", regionLocation=").append(regionLocation);
@@ -195,7 +195,7 @@ public class RegionStateStore {
         .setFamily(HConstants.CATALOG_FAMILY)
         .setQualifier(getStateColumn(replicaId))
         .setTimestamp(put.getTimeStamp())
-        .setType(CellBuilder.DataType.Put)
+        .setType(DataType.Put)
         .setValue(Bytes.toBytes(state.name()))
         .build());
     LOG.info(info);


[17/24] hbase git commit: HBASE-19480 Enabled Checkstyle to fail on violations in hbase-annotations

Posted by zh...@apache.org.
HBASE-19480 Enabled Checkstyle to fail on violations in hbase-annotations


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df351e40
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df351e40
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df351e40

Branch: refs/heads/HBASE-19397
Commit: df351e4035df0f8db56245ea9cf54952874294ca
Parents: ec7bf57
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 10 19:06:16 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 21:35:54 2017 +0100

----------------------------------------------------------------------
 hbase-annotations/pom.xml | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/df351e40/hbase-annotations/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index 8165fbd..86eb313 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -30,4 +30,25 @@
   <artifactId>hbase-annotations</artifactId>
   <name>Apache HBase - Annotations</name>
   <description>Annotations for tests</description>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>checkstyle</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+            <configuration>
+              <failOnViolation>true</failOnViolation>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
 </project>


[24/24] hbase git commit: HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

Posted by zh...@apache.org.
HBASE-19536 Client side changes for moving peer modification from zk watcher to procedure

Signed-off-by: zhangduo <zh...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9dd4ada2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9dd4ada2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9dd4ada2

Branch: refs/heads/HBASE-19397
Commit: 9dd4ada2a081e7c75a65b15f2ea4ade3c9e19bef
Parents: 87a5b42
Author: Guanghao Zhang <zg...@apache.org>
Authored: Tue Dec 19 15:50:57 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Wed Dec 20 09:29:13 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/client/Admin.java   |  87 ++++++++++-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  | 149 ++++++++++++++-----
 .../hadoop/hbase/client/RawAsyncHBaseAdmin.java |  82 +++++-----
 .../replication/TestReplicationAdmin.java       |   2 +-
 4 files changed, 239 insertions(+), 81 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9dd4ada2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 0567e8e..fe5eeb6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -2464,7 +2464,7 @@ public interface Admin extends Abortable, Closeable {
   /**
    * Add a new replication peer for replicating data to slave cluster.
    * @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
    * @throws IOException if a remote or network exception occurs
    */
   default void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig)
@@ -2475,7 +2475,7 @@ public interface Admin extends Abortable, Closeable {
   /**
    * Add a new replication peer for replicating data to slave cluster.
    * @param peerId a short name that identifies the peer
-   * @param peerConfig configuration for the replication slave cluster
+   * @param peerConfig configuration for the replication peer
    * @param enabled peer state, true if ENABLED and false if DISABLED
    * @throws IOException if a remote or network exception occurs
    */
@@ -2483,6 +2483,37 @@ public interface Admin extends Abortable, Closeable {
       throws IOException;
 
   /**
+   * Add a new replication peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  default Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig)
+      throws IOException {
+    return addReplicationPeerAsync(peerId, peerConfig, true);
+  }
+
+  /**
+   * Add a new replication peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication peer
+   * @param enabled peer state, true if ENABLED and false if DISABLED
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
+      boolean enabled) throws IOException;
+
+  /**
    * Remove a peer and stop the replication.
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
@@ -2490,6 +2521,18 @@ public interface Admin extends Abortable, Closeable {
   void removeReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Remove a replication peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future<Void> removeReplicationPeerAsync(String peerId) throws IOException;
+
+  /**
    * Restart the replication stream to the specified peer.
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
@@ -2497,6 +2540,18 @@ public interface Admin extends Abortable, Closeable {
   void enableReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Enable a replication peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future<Void> enableReplicationPeerAsync(String peerId) throws IOException;
+
+  /**
    * Stop the replication stream to the specified peer.
    * @param peerId a short name that identifies the peer
    * @throws IOException if a remote or network exception occurs
@@ -2504,6 +2559,18 @@ public interface Admin extends Abortable, Closeable {
   void disableReplicationPeer(String peerId) throws IOException;
 
   /**
+   * Disable a replication peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future<Void> disableReplicationPeerAsync(String peerId) throws IOException;
+
+  /**
    * Returns the configured ReplicationPeerConfig for the specified peer.
    * @param peerId a short name that identifies the peer
    * @return ReplicationPeerConfig for the peer
@@ -2514,13 +2581,27 @@ public interface Admin extends Abortable, Closeable {
   /**
    * Update the peerConfig for the specified peer.
    * @param peerId a short name that identifies the peer
-   * @param peerConfig new config for the peer
+   * @param peerConfig new config for the replication peer
    * @throws IOException if a remote or network exception occurs
    */
   void updateReplicationPeerConfig(String peerId,
       ReplicationPeerConfig peerConfig) throws IOException;
 
   /**
+   * Update the peerConfig for the specified peer but does not block and wait for it.
+   * <p>
+   * You can use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw
+   * ExecutionException if there was an error while executing the operation or TimeoutException in
+   * case the wait timeout was not long enough to allow the operation to complete.
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig new config for the replication peer
+   * @return the result of the async operation
+   * @throws IOException IOException if a remote or network exception occurs
+   */
+  Future<Void> updateReplicationPeerConfigAsync(String peerId, ReplicationPeerConfig peerConfig)
+      throws IOException;
+
+  /**
    * Append the replicable table column family config from the specified peer.
    * @param id a short that identifies the cluster
    * @param tableCfs A map from tableName to column family names

http://git-wip-us.apache.org/repos/asf/hbase/blob/9dd4ada2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 207d28b..0ce62d7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -42,6 +42,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Supplier;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -201,7 +202,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTa
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.AddReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.DisableReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.EnableReplicationPeerResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.RemoveReplicationPeerResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos;
 
 /**
@@ -3781,6 +3787,25 @@ public class HBaseAdmin implements Admin {
     }
   }
 
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
+  private static class ReplicationFuture extends ProcedureFuture<Void> {
+    private final String peerId;
+    private final Supplier<String> getOperation;
+
+    public ReplicationFuture(HBaseAdmin admin, String peerId, Long procId,
+        Supplier<String> getOperation) {
+      super(admin, procId);
+      this.peerId = peerId;
+      this.getOperation = getOperation;
+    }
+
+    @Override
+    public String toString() {
+      return "Operation: " + getOperation.get() + ", peerId: " + peerId;
+    }
+  }
+
   @Override
   public List<SecurityCapability> getSecurityCapabilities() throws IOException {
     try {
@@ -3853,50 +3878,82 @@ public class HBaseAdmin implements Admin {
   @Override
   public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
       throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        master.addReplicationPeer(getRpcController(),
-          RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled));
-        return null;
-      }
-    });
+    get(addReplicationPeerAsync(peerId, peerConfig, enabled), this.syncWaitTimeout,
+      TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerConfig,
+      boolean enabled) throws IOException {
+    AddReplicationPeerResponse response = executeCallable(
+      new MasterCallable<AddReplicationPeerResponse>(getConnection(), getRpcControllerFactory()) {
+        @Override
+        protected AddReplicationPeerResponse rpcCall() throws Exception {
+          return master.addReplicationPeer(getRpcController(),
+            RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled));
+        }
+      });
+    return new ReplicationFuture(this, peerId, response.getProcId(), () -> "ADD_REPLICATION_PEER");
   }
 
   @Override
   public void removeReplicationPeer(String peerId) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        master.removeReplicationPeer(getRpcController(),
-          RequestConverter.buildRemoveReplicationPeerRequest(peerId));
-        return null;
-      }
-    });
+    get(removeReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> removeReplicationPeerAsync(String peerId) throws IOException {
+    RemoveReplicationPeerResponse response =
+      executeCallable(new MasterCallable<RemoveReplicationPeerResponse>(getConnection(),
+          getRpcControllerFactory()) {
+        @Override
+        protected RemoveReplicationPeerResponse rpcCall() throws Exception {
+          return master.removeReplicationPeer(getRpcController(),
+            RequestConverter.buildRemoveReplicationPeerRequest(peerId));
+        }
+      });
+    return new ReplicationFuture(this, peerId, response.getProcId(),
+      () -> "REMOVE_REPLICATION_PEER");
   }
 
   @Override
   public void enableReplicationPeer(final String peerId) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        master.enableReplicationPeer(getRpcController(),
-          RequestConverter.buildEnableReplicationPeerRequest(peerId));
-        return null;
-      }
-    });
+    get(enableReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> enableReplicationPeerAsync(final String peerId) throws IOException {
+    EnableReplicationPeerResponse response =
+      executeCallable(new MasterCallable<EnableReplicationPeerResponse>(getConnection(),
+          getRpcControllerFactory()) {
+        @Override
+        protected EnableReplicationPeerResponse rpcCall() throws Exception {
+          return master.enableReplicationPeer(getRpcController(),
+            RequestConverter.buildEnableReplicationPeerRequest(peerId));
+        }
+      });
+    return new ReplicationFuture(this, peerId, response.getProcId(),
+      () -> "ENABLE_REPLICATION_PEER");
   }
 
   @Override
   public void disableReplicationPeer(final String peerId) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        master.disableReplicationPeer(getRpcController(),
-          RequestConverter.buildDisableReplicationPeerRequest(peerId));
-        return null;
-      }
-    });
+    get(disableReplicationPeerAsync(peerId), this.syncWaitTimeout, TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> disableReplicationPeerAsync(final String peerId) throws IOException {
+    DisableReplicationPeerResponse response =
+      executeCallable(new MasterCallable<DisableReplicationPeerResponse>(getConnection(),
+          getRpcControllerFactory()) {
+        @Override
+        protected DisableReplicationPeerResponse rpcCall() throws Exception {
+          return master.disableReplicationPeer(getRpcController(),
+            RequestConverter.buildDisableReplicationPeerRequest(peerId));
+        }
+      });
+    return new ReplicationFuture(this, peerId, response.getProcId(),
+      () -> "DISABLE_REPLICATION_PEER");
   }
 
   @Override
@@ -3915,14 +3972,24 @@ public class HBaseAdmin implements Admin {
   @Override
   public void updateReplicationPeerConfig(final String peerId,
       final ReplicationPeerConfig peerConfig) throws IOException {
-    executeCallable(new MasterCallable<Void>(getConnection(), getRpcControllerFactory()) {
-      @Override
-      protected Void rpcCall() throws Exception {
-        master.updateReplicationPeerConfig(getRpcController(),
-          RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig));
-        return null;
-      }
-    });
+    get(updateReplicationPeerConfigAsync(peerId, peerConfig), this.syncWaitTimeout,
+      TimeUnit.MILLISECONDS);
+  }
+
+  @Override
+  public Future<Void> updateReplicationPeerConfigAsync(final String peerId,
+      final ReplicationPeerConfig peerConfig) throws IOException {
+    UpdateReplicationPeerConfigResponse response =
+      executeCallable(new MasterCallable<UpdateReplicationPeerConfigResponse>(getConnection(),
+          getRpcControllerFactory()) {
+        @Override
+        protected UpdateReplicationPeerConfigResponse rpcCall() throws Exception {
+          return master.updateReplicationPeerConfig(getRpcController(),
+            RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig));
+        }
+      });
+    return new ReplicationFuture(this, peerId, response.getProcId(),
+      () -> "UPDATE_REPLICATION_PEER_CONFIG");
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/9dd4ada2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 5e9356a..55f463f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -40,6 +40,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.BiConsumer;
 import java.util.function.Function;
+import java.util.function.Supplier;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -1525,47 +1526,34 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   @Override
   public CompletableFuture<Void> addReplicationPeer(String peerId,
       ReplicationPeerConfig peerConfig, boolean enabled) {
-    return this
-        .<Void> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<AddReplicationPeerRequest, AddReplicationPeerResponse, Void> call(controller, stub,
-                RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled), (s,
-                    c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> null)).call();
+    return this.<AddReplicationPeerRequest, AddReplicationPeerResponse> procedureCall(
+      RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled),
+      (s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
+      new ReplicationProcedureBiConsumer(peerId, () -> "ADD_REPLICATION_PEER"));
   }
 
   @Override
   public CompletableFuture<Void> removeReplicationPeer(String peerId) {
-    return this
-        .<Void> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<RemoveReplicationPeerRequest, RemoveReplicationPeerResponse, Void> call(controller,
-                stub, RequestConverter.buildRemoveReplicationPeerRequest(peerId),
-                (s, c, req, done) -> s.removeReplicationPeer(c, req, done), (resp) -> null)).call();
+    return this.<RemoveReplicationPeerRequest, RemoveReplicationPeerResponse> procedureCall(
+      RequestConverter.buildRemoveReplicationPeerRequest(peerId),
+      (s, c, req, done) -> s.removeReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
+      new ReplicationProcedureBiConsumer(peerId, () -> "REMOVE_REPLICATION_PEER"));
   }
 
   @Override
   public CompletableFuture<Void> enableReplicationPeer(String peerId) {
-    return this
-        .<Void> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<EnableReplicationPeerRequest, EnableReplicationPeerResponse, Void> call(controller,
-                stub, RequestConverter.buildEnableReplicationPeerRequest(peerId),
-                (s, c, req, done) -> s.enableReplicationPeer(c, req, done), (resp) -> null)).call();
+    return this.<EnableReplicationPeerRequest, EnableReplicationPeerResponse> procedureCall(
+      RequestConverter.buildEnableReplicationPeerRequest(peerId),
+      (s, c, req, done) -> s.enableReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
+      new ReplicationProcedureBiConsumer(peerId, () -> "ENABLE_REPLICATION_PEER"));
   }
 
   @Override
   public CompletableFuture<Void> disableReplicationPeer(String peerId) {
-    return this
-        .<Void> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<DisableReplicationPeerRequest, DisableReplicationPeerResponse, Void> call(
-                controller, stub, RequestConverter.buildDisableReplicationPeerRequest(peerId), (s,
-                    c, req, done) -> s.disableReplicationPeer(c, req, done), (resp) -> null))
-        .call();
+    return this.<DisableReplicationPeerRequest, DisableReplicationPeerResponse> procedureCall(
+      RequestConverter.buildDisableReplicationPeerRequest(peerId),
+      (s, c, req, done) -> s.disableReplicationPeer(c, req, done), (resp) -> resp.getProcId(),
+      new ReplicationProcedureBiConsumer(peerId, () -> "DISABLE_REPLICATION_PEER"));
   }
 
   @Override
@@ -1584,13 +1572,11 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
   public CompletableFuture<Void> updateReplicationPeerConfig(String peerId,
       ReplicationPeerConfig peerConfig) {
     return this
-        .<Void> newMasterCaller()
-        .action(
-          (controller, stub) -> this
-              .<UpdateReplicationPeerConfigRequest, UpdateReplicationPeerConfigResponse, Void> call(
-                controller, stub, RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId,
-                  peerConfig), (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done), (
-                    resp) -> null)).call();
+        .<UpdateReplicationPeerConfigRequest, UpdateReplicationPeerConfigResponse> procedureCall(
+          RequestConverter.buildUpdateReplicationPeerConfigRequest(peerId, peerConfig),
+          (s, c, req, done) -> s.updateReplicationPeerConfig(c, req, done),
+          (resp) -> resp.getProcId(),
+          new ReplicationProcedureBiConsumer(peerId, () -> "UPDATE_REPLICATION_PEER_CONFIG"));
   }
 
   @Override
@@ -2547,6 +2533,30 @@ class RawAsyncHBaseAdmin implements AsyncAdmin {
     }
   }
 
+  private class ReplicationProcedureBiConsumer extends ProcedureBiConsumer {
+    private final String peerId;
+    private final Supplier<String> getOperation;
+
+    ReplicationProcedureBiConsumer(String peerId, Supplier<String> getOperation) {
+      this.peerId = peerId;
+      this.getOperation = getOperation;
+    }
+
+    String getDescription() {
+      return "Operation: " + getOperation.get() + ", peerId: " + peerId;
+    }
+
+    @Override
+    void onFinished() {
+      LOG.info(getDescription() + " completed");
+    }
+
+    @Override
+    void onError(Throwable error) {
+      LOG.info(getDescription() + " failed with " + error.getMessage());
+    }
+  }
+
   private CompletableFuture<Void> waitProcedureResult(CompletableFuture<Long> procFuture) {
     CompletableFuture<Void> future = new CompletableFuture<>();
     procFuture.whenComplete((procId, error) -> {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9dd4ada2/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 83a2e12..b42ba31 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -748,4 +748,4 @@ public class TestReplicationAdmin {
     assertEquals(2097152, admin.getPeerConfig(ID_ONE).getBandwidth());
     admin.removePeer(ID_ONE);
   }
-}
+}
\ No newline at end of file


[15/24] hbase git commit: HBASE-19537 Removed unnecessary semicolons from hbase-backup

Posted by zh...@apache.org.
HBASE-19537 Removed unnecessary semicolons from hbase-backup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d50ae037
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d50ae037
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d50ae037

Branch: refs/heads/HBASE-19397
Commit: d50ae037160cee874017280147263ce0ab07101a
Parents: dd00081
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 17 15:32:54 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 21:03:07 2017 +0100

----------------------------------------------------------------------
 .../apache/hadoop/hbase/backup/BackupInfo.java  |  4 +--
 .../hadoop/hbase/backup/RestoreDriver.java      |  2 +-
 .../hbase/backup/impl/BackupAdminImpl.java      |  6 ++--
 .../hbase/backup/impl/BackupCommands.java       | 34 +++++++++-----------
 .../hbase/backup/impl/BackupManifest.java       |  2 +-
 .../hbase/backup/impl/BackupSystemTable.java    | 11 +++----
 .../backup/impl/FullTableBackupClient.java      |  3 +-
 .../backup/impl/IncrementalBackupManager.java   |  4 +--
 .../impl/IncrementalTableBackupClient.java      |  2 +-
 .../hbase/backup/impl/RestoreTablesClient.java  |  2 +-
 .../hbase/backup/impl/TableBackupClient.java    |  2 +-
 .../mapreduce/MapReduceBackupCopyJob.java       |  2 +-
 .../mapreduce/MapReduceBackupMergeJob.java      |  4 +--
 .../hadoop/hbase/backup/util/RestoreTool.java   |  6 ++--
 .../hadoop/hbase/backup/TestBackupBase.java     |  2 +-
 .../hbase/backup/TestBackupDeleteRestore.java   |  2 +-
 .../TestIncrementalBackupMergeWithFailures.java |  5 ++-
 17 files changed, 43 insertions(+), 50 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
index 0df712e..889d223 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/BackupInfo.java
@@ -62,7 +62,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
    * Backup session states
    */
   public static enum BackupState {
-    RUNNING, COMPLETE, FAILED, ANY;
+    RUNNING, COMPLETE, FAILED, ANY
   }
 
   /**
@@ -70,7 +70,7 @@ public class BackupInfo implements Comparable<BackupInfo> {
    * BackupState.RUNNING
    */
   public static enum BackupPhase {
-    REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST;
+    REQUEST, SNAPSHOT, PREPARE_INCREMENTAL, SNAPSHOTCOPY, INCREMENTAL_COPY, STORE_MANIFEST
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
index b99246b..c8e3474 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/RestoreDriver.java
@@ -147,7 +147,7 @@ public class RestoreDriver extends AbstractHBaseTool {
     String tableMapping =
         cmd.hasOption(OPTION_TABLE_MAPPING) ? cmd.getOptionValue(OPTION_TABLE_MAPPING) : null;
     try (final Connection conn = ConnectionFactory.createConnection(conf);
-        BackupAdmin client = new BackupAdminImpl(conn);) {
+        BackupAdmin client = new BackupAdminImpl(conn)) {
       // Check backup set
       if (cmd.hasOption(OPTION_SET)) {
         String setName = cmd.getOptionValue(OPTION_SET);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
index 0b01a98..9a20b7b 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupAdminImpl.java
@@ -459,7 +459,7 @@ public class BackupAdminImpl implements BackupAdmin {
   public void addToBackupSet(String name, TableName[] tables) throws IOException {
     String[] tableNames = new String[tables.length];
     try (final BackupSystemTable table = new BackupSystemTable(conn);
-        final Admin admin = conn.getAdmin();) {
+        final Admin admin = conn.getAdmin()) {
       for (int i = 0; i < tables.length; i++) {
         tableNames[i] = tables[i].getNameAsString();
         if (!admin.tableExists(TableName.valueOf(tableNames[i]))) {
@@ -561,7 +561,7 @@ public class BackupAdminImpl implements BackupAdmin {
         outputFs.mkdirs(targetTableBackupDirPath);
       }
       ArrayList<TableName> nonExistingTableList = null;
-      try (Admin admin = conn.getAdmin();) {
+      try (Admin admin = conn.getAdmin()) {
         for (TableName tableName : tableList) {
           if (!admin.tableExists(tableName)) {
             if (nonExistingTableList == null) {
@@ -615,7 +615,7 @@ public class BackupAdminImpl implements BackupAdmin {
 
   @Override
   public void mergeBackups(String[] backupIds) throws IOException {
-    try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
+    try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
       checkIfValidForMerge(backupIds, sysTable);
       BackupMergeJob job = BackupRestoreFactory.getBackupMergeJob(conn.getConfiguration());
       job.run(backupIds);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
index 102d45d..4566745 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupCommands.java
@@ -144,7 +144,7 @@ public final class BackupCommands {
       conn = ConnectionFactory.createConnection(getConf());
       if (requiresNoActiveSession()) {
         // Check active session
-        try (BackupSystemTable table = new BackupSystemTable(conn);) {
+        try (BackupSystemTable table = new BackupSystemTable(conn)) {
           List<BackupInfo> sessions = table.getBackupInfos(BackupState.RUNNING);
 
           if (sessions.size() > 0) {
@@ -158,7 +158,7 @@ public final class BackupCommands {
       }
       if (requiresConsistentState()) {
         // Check failed delete
-        try (BackupSystemTable table = new BackupSystemTable(conn);) {
+        try (BackupSystemTable table = new BackupSystemTable(conn)) {
           String[] ids = table.getListOfBackupIdsFromDeleteOperation();
 
           if (ids != null && ids.length > 0) {
@@ -332,8 +332,7 @@ public final class BackupCommands {
         System.setProperty("mapreduce.job.queuename", queueName);
       }
 
-      try (BackupAdminImpl admin = new BackupAdminImpl(conn);) {
-
+      try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         BackupRequest.Builder builder = new BackupRequest.Builder();
         BackupRequest request =
             builder
@@ -471,7 +470,7 @@ public final class BackupCommands {
       super.execute();
 
       String backupId = args[1];
-      try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
+      try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
         BackupInfo info = sysTable.readBackupInfo(backupId);
         if (info == null) {
           System.out.println("ERROR: " + backupId + " does not exist");
@@ -512,7 +511,7 @@ public final class BackupCommands {
       super.execute();
 
       String backupId = (args == null || args.length <= 1) ? null : args[1];
-      try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
+      try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
         BackupInfo info = null;
 
         if (backupId != null) {
@@ -569,7 +568,7 @@ public final class BackupCommands {
       String[] args = cmdline.getArgs();
       String[] backupIds = new String[args.length - 1];
       System.arraycopy(args, 1, backupIds, 0, backupIds.length);
-      try (BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+      try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         int deleted = admin.deleteBackups(backupIds);
         System.out.println("Deleted " + deleted + " backups. Total requested: " + (args.length -1));
       } catch (IOException e) {
@@ -606,8 +605,7 @@ public final class BackupCommands {
 
       Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
       try (final Connection conn = ConnectionFactory.createConnection(conf);
-          final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
-
+          final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
         // Failed backup
         BackupInfo backupInfo;
         List<BackupInfo> list = sysTable.getBackupInfos(BackupState.RUNNING);
@@ -658,7 +656,7 @@ public final class BackupCommands {
       BackupSystemTable.restoreFromSnapshot(conn);
       // Finish previous failed session
       sysTable.finishBackupExclusiveOperation();
-      try (BackupAdmin admin = new BackupAdminImpl(conn);) {
+      try (BackupAdmin admin = new BackupAdminImpl(conn)) {
         admin.deleteBackups(backupIds);
       }
       System.out.println("DELETE operation finished OK: " + StringUtils.join(backupIds));
@@ -682,7 +680,7 @@ public final class BackupCommands {
       sysTable.finishBackupExclusiveOperation();
       // Finish previous failed session
       sysTable.finishMergeOperation();
-      try (BackupAdmin admin = new BackupAdminImpl(conn);) {
+      try (BackupAdmin admin = new BackupAdminImpl(conn)) {
         admin.mergeBackups(backupIds);
       }
       System.out.println("MERGE operation finished OK: " + StringUtils.join(backupIds));
@@ -734,7 +732,7 @@ public final class BackupCommands {
       }
       Configuration conf = getConf() != null ? getConf() : HBaseConfiguration.create();
       try (final Connection conn = ConnectionFactory.createConnection(conf);
-          final BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+          final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         admin.mergeBackups(backupIds);
       }
     }
@@ -781,7 +779,7 @@ public final class BackupCommands {
       if (backupRootPath == null) {
         // Load from backup system table
         super.execute();
-        try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
+        try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
           history = sysTable.getBackupHistory(n, tableNameFilter, tableSetFilter);
         }
       } else {
@@ -905,7 +903,7 @@ public final class BackupCommands {
 
       // List all backup set names
       // does not expect any args
-      try (BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+      try (BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         List<BackupSet> list = admin.listBackupSets();
         for (BackupSet bs : list) {
           System.out.println(bs);
@@ -921,7 +919,7 @@ public final class BackupCommands {
       super.execute();
 
       String setName = args[2];
-      try (final BackupSystemTable sysTable = new BackupSystemTable(conn);) {
+      try (final BackupSystemTable sysTable = new BackupSystemTable(conn)) {
         List<TableName> tables = sysTable.describeBackupSet(setName);
         BackupSet set = tables == null ? null : new BackupSet(setName, tables);
         if (set == null) {
@@ -940,7 +938,7 @@ public final class BackupCommands {
       super.execute();
 
       String setName = args[2];
-      try (final BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+      try (final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         boolean result = admin.deleteBackupSet(setName);
         if (result) {
           System.out.println("Delete set " + setName + " OK.");
@@ -960,7 +958,7 @@ public final class BackupCommands {
       String setName = args[2];
       String[] tables = args[3].split(",");
       TableName[] tableNames = toTableNames(tables);
-      try (final BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+      try (final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         admin.removeFromBackupSet(setName, tableNames);
       }
     }
@@ -986,7 +984,7 @@ public final class BackupCommands {
       for (int i = 0; i < tables.length; i++) {
         tableNames[i] = TableName.valueOf(tables[i]);
       }
-      try (final BackupAdminImpl admin = new BackupAdminImpl(conn);) {
+      try (final BackupAdminImpl admin = new BackupAdminImpl(conn)) {
         admin.addToBackupSet(setName, tableNames);
       }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
index 4e6f6ff..27f3750 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupManifest.java
@@ -485,7 +485,7 @@ public class BackupManifest {
         new Path(HBackupFileSystem.getBackupPath(backupImage.getRootDir(),
           backupImage.getBackupId()), MANIFEST_FILE_NAME);
     try (FSDataOutputStream out =
-        manifestFilePath.getFileSystem(conf).create(manifestFilePath, true);) {
+        manifestFilePath.getFileSystem(conf).create(manifestFilePath, true)) {
       out.write(data);
     } catch (IOException e) {
       throw new BackupException(e.getMessage());

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 37dd089..ebfc9f3 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -176,8 +176,7 @@ public final class BackupSystemTable implements Closeable {
   }
 
   private void checkSystemTable() throws IOException {
-    try (Admin admin = connection.getAdmin();) {
-
+    try (Admin admin = connection.getAdmin()) {
       verifyNamespaceExists(admin);
 
       if (!admin.tableExists(tableName)) {
@@ -317,7 +316,7 @@ public final class BackupSystemTable implements Closeable {
           LOG.debug("found bulk loaded file : " + tbl + " " + Bytes.toString(fam) + " " + path);
         }
       }
-      ;
+
       return mapForSrc;
     }
   }
@@ -1546,7 +1545,7 @@ public final class BackupSystemTable implements Closeable {
   }
 
   public static void snapshot(Connection conn) throws IOException {
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       Configuration conf = conn.getConfiguration();
       admin.snapshot(BackupSystemTable.getSnapshotName(conf), BackupSystemTable.getTableName(conf));
     }
@@ -1555,7 +1554,7 @@ public final class BackupSystemTable implements Closeable {
   public static void restoreFromSnapshot(Connection conn) throws IOException {
     Configuration conf = conn.getConfiguration();
     LOG.debug("Restoring " + BackupSystemTable.getTableNameAsString(conf) + " from snapshot");
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       String snapshotName = BackupSystemTable.getSnapshotName(conf);
       if (snapshotExists(admin, snapshotName)) {
         admin.disableTable(BackupSystemTable.getTableName(conf));
@@ -1589,7 +1588,7 @@ public final class BackupSystemTable implements Closeable {
   public static void deleteSnapshot(Connection conn) throws IOException {
     Configuration conf = conn.getConfiguration();
     LOG.debug("Deleting " + BackupSystemTable.getSnapshotName(conf) + " from the system");
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       String snapshotName = BackupSystemTable.getSnapshotName(conf);
       if (snapshotExists(admin, snapshotName)) {
         admin.deleteSnapshot(snapshotName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index cb8e092..16999cc 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -121,8 +121,7 @@ public class FullTableBackupClient extends TableBackupClient {
    */
   @Override
   public void execute() throws IOException {
-    try (Admin admin = conn.getAdmin();) {
-
+    try (Admin admin = conn.getAdmin()) {
       // Begin BACKUP
       beginBackup(backupManager, backupInfo);
       String savedStartCode = null;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
index d9258c5..74408c3 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalBackupManager.java
@@ -92,11 +92,9 @@ public class IncrementalBackupManager extends BackupManager {
     HashMap<String, String> props = new HashMap<String, String>();
     props.put("backupRoot", backupInfo.getBackupRootDir());
 
-    try (Admin admin = conn.getAdmin();) {
-
+    try (Admin admin = conn.getAdmin()) {
       admin.execProcedure(LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_SIGNATURE,
         LogRollMasterProcedureManager.ROLLLOG_PROCEDURE_NAME, props);
-
     }
     newTimestamps = readRegionServerLastLogRollResult();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
index 00baeb7..37c45e0 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/IncrementalTableBackupClient.java
@@ -349,7 +349,7 @@ public class IncrementalTableBackupClient extends TableBackupClient {
 
 
   protected boolean tableExists(TableName table, Connection conn) throws IOException {
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       return admin.tableExists(table);
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
index fc0fdde..099a70d 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/RestoreTablesClient.java
@@ -92,7 +92,7 @@ public class RestoreTablesClient {
     ArrayList<TableName> disabledTableList = new ArrayList<>();
 
     // check if the tables already exist
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       for (TableName tableName : tTableArray) {
         if (admin.tableExists(tableName)) {
           existTableList.add(tableName);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
index aa0ec5f..05fcec3 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/impl/TableBackupClient.java
@@ -146,7 +146,7 @@ public abstract class TableBackupClient {
       }
       LOG.debug("Trying to delete snapshot: " + snapshotName);
 
-      try (Admin admin = conn.getAdmin();) {
+      try (Admin admin = conn.getAdmin()) {
         admin.deleteSnapshot(snapshotName);
       }
       LOG.debug("Deleting the snapshot " + snapshotName + " for backup " + backupInfo.getBackupId()

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java
index 90e8442..ee99cd9 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupCopyJob.java
@@ -298,7 +298,7 @@ public class MapReduceBackupCopyJob implements BackupCopyJob {
       long totalBytesExpected = 0;
       int totalRecords = 0;
       Path fileListingPath = getFileListingPath();
-      try (SequenceFile.Writer writer = getWriter(fileListingPath);) {
+      try (SequenceFile.Writer writer = getWriter(fileListingPath)) {
         List<Path> srcFiles = getSourceFiles();
         if (srcFiles.size() == 0) {
           return fileListingPath;

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
index dafc88b..b93d911 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/mapreduce/MapReduceBackupMergeJob.java
@@ -223,7 +223,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
       String backupRoot) throws IOException {
 
     // Delete from backup system table
-    try (BackupSystemTable table = new BackupSystemTable(conn);) {
+    try (BackupSystemTable table = new BackupSystemTable(conn)) {
       for (String backupId : backupIds) {
         table.deleteBackupInfo(backupId);
       }
@@ -286,7 +286,7 @@ public class MapReduceBackupMergeJob implements BackupMergeJob {
     Set<TableName> allSet = new HashSet<TableName>();
 
     try (Connection conn = ConnectionFactory.createConnection(conf);
-        BackupSystemTable table = new BackupSystemTable(conn);) {
+        BackupSystemTable table = new BackupSystemTable(conn)) {
       for (String backupId : backupIds) {
         BackupInfo bInfo = table.readBackupInfo(backupId);
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
index b00351b..3973c3c 100644
--- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
+++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/util/RestoreTool.java
@@ -125,7 +125,7 @@ public class RestoreTool {
 
   void modifyTableSync(Connection conn, TableDescriptor desc) throws IOException {
 
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       admin.modifyTable(desc);
       int attempt = 0;
       int maxAttempts = 600;
@@ -156,7 +156,7 @@ public class RestoreTool {
   public void incrementalRestoreTable(Connection conn, Path tableBackupPath, Path[] logDirs,
       TableName[] tableNames, TableName[] newTableNames, String incrBackupId) throws IOException {
 
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       if (tableNames.length != newTableNames.length) {
         throw new IOException("Number of source tables and target tables does not match!");
       }
@@ -474,7 +474,7 @@ public class RestoreTool {
   private void checkAndCreateTable(Connection conn, Path tableBackupPath, TableName tableName,
       TableName targetTableName, ArrayList<Path> regionDirList, TableDescriptor htd,
       boolean truncateIfExists) throws IOException {
-    try (Admin admin = conn.getAdmin();) {
+    try (Admin admin = conn.getAdmin()) {
       boolean createNew = false;
       if (admin.tableExists(targetTableName)) {
         if (truncateIfExists) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
index c44efbd..8d23c69 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupBase.java
@@ -194,7 +194,7 @@ public class TestBackupBase {
     public void execute() throws IOException
     {
       // Get the stage ID to fail on
-      try (Admin admin = conn.getAdmin();) {
+      try (Admin admin = conn.getAdmin()) {
         // Begin BACKUP
         beginBackup(backupManager, backupInfo);
         failStageIf(Stage.stage_0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
index 85fba4b..73210ce 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestBackupDeleteRestore.java
@@ -51,7 +51,7 @@ public class TestBackupDeleteRestore extends TestBackupBase {
     int numRows = TEST_UTIL.countRows(table1);
     HBaseAdmin hba = TEST_UTIL.getHBaseAdmin();
     // delete row
-    try (Table table = TEST_UTIL.getConnection().getTable(table1);) {
+    try (Table table = TEST_UTIL.getConnection().getTable(table1)) {
       Delete delete = new Delete("row0".getBytes());
       table.delete(delete);
       hba.flush(table1);

http://git-wip-us.apache.org/repos/asf/hbase/blob/d50ae037/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
----------------------------------------------------------------------
diff --git a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
index 011ddf2..ebac1ea 100644
--- a/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
+++ b/hbase-backup/src/test/java/org/apache/hadoop/hbase/backup/TestIncrementalBackupMergeWithFailures.java
@@ -267,8 +267,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
 
       conf.set(FAILURE_PHASE_KEY, phase.toString());
 
-      try (BackupAdmin bAdmin = new BackupAdminImpl(conn);)
-      {
+      try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
         String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
         bAdmin.mergeBackups(backups);
         Assert.fail("Expected IOException");
@@ -306,7 +305,7 @@ public class TestIncrementalBackupMergeWithFailures extends TestBackupBase {
     conf.unset(FAILURE_PHASE_KEY);
     conf.unset(BackupRestoreFactory.HBASE_BACKUP_MERGE_IMPL_CLASS);
 
-    try (BackupAdmin bAdmin = new BackupAdminImpl(conn);) {
+    try (BackupAdmin bAdmin = new BackupAdminImpl(conn)) {
       String[] backups = new String[] { backupIdIncMultiple, backupIdIncMultiple2 };
       bAdmin.mergeBackups(backups);
     }


[03/24] hbase git commit: HBASE-19122 Suspect methods on Cell to be deprecated

Posted by zh...@apache.org.
http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
index f77df4c..b68e4ff 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/MobUtils.java
@@ -102,7 +102,7 @@ public final class MobUtils {
   static {
     List<Tag> tags = new ArrayList<>();
     tags.add(MobConstants.MOB_REF_TAG);
-    REF_DELETE_MARKER_TAG_BYTES = Tag.fromList(tags);
+    REF_DELETE_MARKER_TAG_BYTES = TagUtil.fromList(tags);
   }
 
   /**
@@ -502,7 +502,7 @@ public final class MobUtils {
     // find the original mob files by this table name. For details please see cloning
     // snapshot for mob files.
     tags.add(tableNameTag);
-    return createMobRefCell(cell, fileName, Tag.fromList(tags));
+    return createMobRefCell(cell, fileName, TagUtil.fromList(tags));
   }
 
   public static Cell createMobRefCell(Cell cell, byte[] fileName, byte[] refCellTags) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
index 0cccfa3..cf661db 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/compactions/PartitionedMobCompactor.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -131,7 +132,7 @@ public class PartitionedMobCompactor extends MobCompactor {
     tags.add(MobConstants.MOB_REF_TAG);
     Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE, tableName.getName());
     tags.add(tableNameTag);
-    this.refCellTags = Tag.fromList(tags);
+    this.refCellTags = TagUtil.fromList(tags);
     cryptoContext = EncryptionUtil.createEncryptionContext(copyOfConf, column);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
index 5db7383..e6ca462 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HMobStore.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
+import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -120,7 +121,7 @@ public class HMobStore extends HStore {
     Tag tableNameTag = new ArrayBackedTag(TagType.MOB_TABLE_NAME_TAG_TYPE,
         getTableName().getName());
     tags.add(tableNameTag);
-    this.refCellTags = Tag.fromList(tags);
+    this.refCellTags = TagUtil.fromList(tags);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 8ca1184..98e9df6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -7761,7 +7761,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
               .setTimestamp(Math.max(currentCell.getTimestamp() + 1, now))
               .setType(KeyValue.Type.Put.getCode())
               .setValue(newValue, 0, newValue.length)
-              .setTags(Tag.fromList(tags))
+              .setTags(TagUtil.fromList(tags))
               .build();
     } else {
       PrivateCellUtil.updateLatestStamp(delta, now);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
index b8356a7..1717093 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionCoprocessorHost.java
@@ -38,13 +38,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CompareOperator;
 import org.apache.hadoop.hbase.Coprocessor;
-import org.apache.hadoop.hbase.ExtendedCellBuilder;
-import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.RawCellBuilder;
+import org.apache.hadoop.hbase.RawCellBuilderFactory;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SharedConnection;
 import org.apache.hadoop.hbase.client.Append;
@@ -184,10 +183,9 @@ public class RegionCoprocessorHost
     }
 
     @Override
-    public ExtendedCellBuilder getCellBuilder() {
-      // do not allow seqId update.
+    public RawCellBuilder getCellBuilder() {
       // We always do a DEEP_COPY only
-      return ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY, false);
+      return RawCellBuilderFactory.create();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index f5d70f6..0b40b71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -37,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -173,7 +173,7 @@ public class AccessControlLists {
         .setFamily(ACL_LIST_FAMILY)
         .setQualifier(key)
         .setTimestamp(p.getTimeStamp())
-        .setType(CellBuilder.DataType.Put)
+        .setType(DataType.Put)
         .setValue(value)
         .build());
     if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
index d1fac75..0c6c914 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/DefaultVisibilityLabelServiceImpl.java
@@ -46,7 +46,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -57,13 +57,11 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
-import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
-import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.HasRegionServerServices;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
@@ -74,6 +72,8 @@ import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService {
@@ -218,7 +218,7 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
                     .setFamily(LABELS_TABLE_FAMILY)
                     .setQualifier(LABEL_QUALIFIER)
                     .setTimestamp(p.getTimeStamp())
-                    .setType(CellBuilder.DataType.Put)
+                    .setType(DataType.Put)
                     .setValue(Bytes.toBytes(SYSTEM_LABEL))
                     .build());
       region.put(p);
@@ -246,9 +246,9 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
               .setFamily(LABELS_TABLE_FAMILY)
               .setQualifier(LABEL_QUALIFIER)
               .setTimestamp(p.getTimeStamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(label)
-              .setTags(Tag.fromList(Arrays.asList(LABELS_TABLE_TAGS)))
+              .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS)))
               .build());
         if (LOG.isDebugEnabled()) {
           LOG.debug("Adding the label " + labelStr);
@@ -286,9 +286,9 @@ public class DefaultVisibilityLabelServiceImpl implements VisibilityLabelService
             .setFamily(LABELS_TABLE_FAMILY)
             .setQualifier(user)
             .setTimestamp(p.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(DUMMY_VALUE)
-            .setTags(Tag.fromList(Arrays.asList(LABELS_TABLE_TAGS)))
+            .setTags(TagUtil.fromList(Arrays.asList(LABELS_TABLE_TAGS)))
             .build());
         puts.add(p);
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
index 038b799..14507a4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityReplicationEndpoint.java
@@ -28,17 +28,16 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
-import org.apache.hadoop.hbase.wal.WALKeyImpl;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.replication.ReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.replication.WALEntryFilter;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALKeyImpl;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class VisibilityReplicationEndpoint implements ReplicationEndpoint {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
index d3673c4..0580114 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestMultiRespectsLimits.java
@@ -18,7 +18,13 @@
 
 package org.apache.hadoop.hbase.client;
 
-import org.apache.hadoop.hbase.CellBuilder;
+import static junit.framework.TestCase.assertEquals;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CompatibilityFactory;
@@ -43,12 +49,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ThreadLocalRandom;
-
-import static junit.framework.TestCase.assertEquals;
-
 /**
  * This test sets the multi size WAAAAAY low and then checks to make sure that gets will still make
  * progress.
@@ -157,7 +157,7 @@ public class TestMultiRespectsLimits {
               .setFamily(FAMILY)
               .setQualifier(col)
               .setTimestamp(p.getTimeStamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(value)
               .build());
       t.put(p);

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
index 19f4dbe..934c212 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MockHStoreFile.java
@@ -26,12 +26,10 @@ import java.util.TreeMap;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -192,7 +190,7 @@ public class MockHStoreFile extends HStoreFile {
       public Optional<Cell> getLastKey() {
         if (splitPoint != null) {
           return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
-              .setType(CellBuilder.DataType.Put)
+              .setType(Cell.DataType.Put)
               .setRow(Arrays.copyOf(splitPoint, splitPoint.length + 1)).build());
         } else {
           return Optional.empty();
@@ -203,7 +201,7 @@ public class MockHStoreFile extends HStoreFile {
       public Optional<Cell> midKey() throws IOException {
         if (splitPoint != null) {
           return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
-              .setType(CellBuilder.DataType.Put).setRow(splitPoint).build());
+              .setType(Cell.DataType.Put).setRow(splitPoint).build());
         } else {
           return Optional.empty();
         }
@@ -213,7 +211,7 @@ public class MockHStoreFile extends HStoreFile {
       public Optional<Cell> getFirstKey() {
         if (splitPoint != null) {
           return Optional.of(CellBuilderFactory.create(CellBuilderType.DEEP_COPY)
-              .setType(CellBuilder.DataType.Put).setRow(splitPoint, 0, splitPoint.length - 1)
+              .setType(Cell.DataType.Put).setRow(splitPoint, 0, splitPoint.length - 1)
               .build());
         } else {
           return Optional.empty();

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
index f7b7898..40e941d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionLifeCycleTracker.java
@@ -28,7 +28,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -154,7 +154,7 @@ public class TestCompactionLifeCycleTracker {
                         .setFamily(CF1)
                         .setQualifier(QUALIFIER)
                         .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                        .setType(CellBuilder.DataType.Put)
+                        .setType(DataType.Put)
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }
@@ -167,7 +167,7 @@ public class TestCompactionLifeCycleTracker {
                         .setFamily(CF1)
                         .setQualifier(QUALIFIER)
                         .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                        .setType(CellBuilder.DataType.Put)
+                        .setType(DataType.Put)
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java
index 80bd906..fc8fe41 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestFlushLifeCycleTracker.java
@@ -28,7 +28,7 @@ import java.io.InterruptedIOException;
 import java.util.Optional;
 import java.util.concurrent.CountDownLatch;
 
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
@@ -200,7 +200,7 @@ public class TestFlushLifeCycleTracker {
                         .setFamily(CF)
                         .setQualifier(QUALIFIER)
                         .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                        .setType(CellBuilder.DataType.Put)
+                        .setType(DataType.Put)
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }
@@ -234,7 +234,7 @@ public class TestFlushLifeCycleTracker {
                         .setFamily(CF)
                         .setQualifier(QUALIFIER)
                         .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                        .setType(CellBuilder.DataType.Put)
+                        .setType(DataType.Put)
                         .setValue(Bytes.toBytes(i))
                         .build()));
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index acbdf7d..3482955 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -29,9 +29,9 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyLong;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
@@ -74,7 +74,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -89,7 +89,6 @@ import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.MiniHBaseCluster;
@@ -97,6 +96,7 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.hbase.MultithreadedTestUtil.TestThread;
 import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.RegionTooBusyException;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
@@ -158,7 +158,6 @@ import org.apache.hadoop.hbase.wal.FaultyFSLog;
 import org.apache.hadoop.hbase.wal.WAL;
 import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.hbase.wal.WALKey;
 import org.apache.hadoop.hbase.wal.WALKeyImpl;
 import org.apache.hadoop.hbase.wal.WALProvider;
 import org.apache.hadoop.hbase.wal.WALProvider.Writer;
@@ -6282,20 +6281,20 @@ public class TestHRegion {
               .setRow(a)
               .setFamily(fam1)
               .setTimestamp(HConstants.LATEST_TIMESTAMP)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .build()),
         // this is outside the region boundary
         new Put(c).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
               .setRow(c)
               .setFamily(fam1)
               .setTimestamp(HConstants.LATEST_TIMESTAMP)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .build()),
         new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
               .setRow(b)
               .setFamily(fam1)
               .setTimestamp(HConstants.LATEST_TIMESTAMP)
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .build())
     };
 
@@ -6331,13 +6330,13 @@ public class TestHRegion {
                 .setRow(a)
                 .setFamily(fam1)
                 .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .build()),
             new Put(b).add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY)
                 .setRow(b)
                 .setFamily(fam1)
                 .setTimestamp(HConstants.LATEST_TIMESTAMP)
-                .setType(CellBuilder.DataType.Put)
+                .setType(DataType.Put)
                 .build()),
         };
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
index 39ed9df..6195848 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStore.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellComparator;
@@ -69,9 +68,9 @@ import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.MemoryCompactionPolicy;
+import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
@@ -1049,13 +1048,13 @@ public class TestHStore {
     long seqId = 100;
     long timestamp = System.currentTimeMillis();
     Cell cell0 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
-        .setQualifier(qf1).setTimestamp(timestamp).setType(CellBuilder.DataType.Put)
+        .setQualifier(qf1).setTimestamp(timestamp).setType(Cell.DataType.Put)
         .setValue(qf1).build();
     PrivateCellUtil.setSequenceId(cell0, seqId);
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Collections.emptyList());
 
     Cell cell1 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
-        .setQualifier(qf2).setTimestamp(timestamp).setType(CellBuilder.DataType.Put)
+        .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.DataType.Put)
         .setValue(qf1).build();
     PrivateCellUtil.setSequenceId(cell1, seqId);
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1));
@@ -1063,7 +1062,7 @@ public class TestHStore {
     seqId = 101;
     timestamp = System.currentTimeMillis();
     Cell cell2 = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row2).setFamily(family)
-        .setQualifier(qf2).setTimestamp(timestamp).setType(CellBuilder.DataType.Put)
+        .setQualifier(qf2).setTimestamp(timestamp).setType(Cell.DataType.Put)
         .setValue(qf1).build();
     PrivateCellUtil.setSequenceId(cell2, seqId);
     testNumberOfMemStoreScannersAfterFlush(Arrays.asList(cell0), Arrays.asList(cell1, cell2));
@@ -1118,7 +1117,7 @@ public class TestHStore {
   private Cell createCell(byte[] row, byte[] qualifier, long ts, long sequenceId, byte[] value)
       throws IOException {
     Cell c = CellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(row).setFamily(family)
-        .setQualifier(qualifier).setTimestamp(ts).setType(CellBuilder.DataType.Put)
+        .setQualifier(qualifier).setTimestamp(ts).setType(Cell.DataType.Put)
         .setValue(value).build();
     PrivateCellUtil.setSequenceId(c, sequenceId);
     return c;

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
index e8d8b7e..d61f98e 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/token/TestTokenAuthentication.java
@@ -39,9 +39,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hbase.ChoreService;
 import org.apache.hadoop.hbase.ClusterId;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
-import org.apache.hadoop.hbase.ExtendedCellBuilder;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.RawCellBuilder;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.ClusterConnection;
@@ -331,7 +331,7 @@ public class TestTokenAuthentication {
         }
 
         @Override
-        public ExtendedCellBuilder getCellBuilder() {
+        public RawCellBuilder getCellBuilder() {
           return null;
         }
       });

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
index cfe4157..29ddfce 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/ExpAsStringVisibilityLabelServiceImpl.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.AuthUtil;
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
@@ -111,7 +112,7 @@ public class ExpAsStringVisibilityLabelServiceImpl implements VisibilityLabelSer
           .setFamily(LABELS_TABLE_FAMILY)
           .setQualifier(auth)
           .setTimestamp(p.getTimeStamp())
-          .setType(CellBuilder.DataType.Put)
+          .setType(DataType.Put)
           .setValue(DUMMY_VALUE)
           .build());
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index 98e62eb..27850ef 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -38,10 +38,12 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
+
 import javax.security.auth.callback.Callback;
 import javax.security.auth.callback.UnsupportedCallbackException;
 import javax.security.sasl.AuthorizeCallback;
 import javax.security.sasl.SaslServer;
+
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionGroup;
@@ -49,6 +51,7 @@ import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilder;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
@@ -1350,7 +1353,7 @@ public class ThriftServerRunner implements Runnable {
                   .setFamily(famAndQf[0])
                   .setQualifier(famAndQf[1])
                   .setTimestamp(put.getTimeStamp())
-                  .setType(CellBuilder.DataType.Put)
+                  .setType(DataType.Put)
                   .setValue(m.value != null ? getBytes(m.value)
                       : HConstants.EMPTY_BYTE_ARRAY)
                   .build());
@@ -1418,7 +1421,7 @@ public class ThriftServerRunner implements Runnable {
                     .setFamily(famAndQf[0])
                     .setQualifier(famAndQf[1])
                     .setTimestamp(put.getTimeStamp())
-                    .setType(CellBuilder.DataType.Put)
+                    .setType(DataType.Put)
                     .setValue(m.value != null ? getBytes(m.value)
                         : HConstants.EMPTY_BYTE_ARRAY)
                     .build());
@@ -1901,7 +1904,7 @@ public class ThriftServerRunner implements Runnable {
             .setFamily(famAndQf[0])
             .setQualifier(famAndQf[1])
             .setTimestamp(put.getTimeStamp())
-            .setType(CellBuilder.DataType.Put)
+            .setType(DataType.Put)
             .setValue(mput.value != null ? getBytes(mput.value)
                 : HConstants.EMPTY_BYTE_ARRAY)
             .build());

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4056d26/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
----------------------------------------------------------------------
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
index 59fe1f4..ced9a02 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftUtilities.java
@@ -28,7 +28,7 @@ import java.util.Map;
 
 import org.apache.commons.collections4.MapUtils;
 import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.CellBuilder;
+import org.apache.hadoop.hbase.Cell.DataType;
 import org.apache.hadoop.hbase.CellBuilderFactory;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.CellUtil;
@@ -227,7 +227,7 @@ public class ThriftUtilities {
               .setFamily(columnValue.getFamily())
               .setQualifier(columnValue.getQualifier())
               .setTimestamp(columnValue.getTimestamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(columnValue.getValue())
               .build());
         } else {
@@ -236,7 +236,7 @@ public class ThriftUtilities {
               .setFamily(columnValue.getFamily())
               .setQualifier(columnValue.getQualifier())
               .setTimestamp(out.getTimeStamp())
-              .setType(CellBuilder.DataType.Put)
+              .setType(DataType.Put)
               .setValue(columnValue.getValue())
               .build());
         }


[11/24] hbase git commit: HBASE-19555 TestSplitTransactionOnCluster is flaky

Posted by zh...@apache.org.
HBASE-19555 TestSplitTransactionOnCluster is flaky

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dbe409e2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dbe409e2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dbe409e2

Branch: refs/heads/HBASE-19397
Commit: dbe409e2c7af5312ab370165b89b57d176486e27
Parents: 03e79b7
Author: Peter Somogyi <ps...@cloudera.com>
Authored: Tue Dec 19 15:04:38 2017 +0100
Committer: tedyu <yu...@gmail.com>
Committed: Tue Dec 19 09:25:57 2017 -0800

----------------------------------------------------------------------
 .../regionserver/TestSplitTransactionOnCluster.java | 16 +++++++---------
 1 file changed, 7 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dbe409e2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
index 92833fd..619ffd0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransactionOnCluster.java
@@ -313,7 +313,7 @@ public class TestSplitTransactionOnCluster {
       // Get region pre-split.
       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
       printOutRegions(server, "Initial regions: ");
-      int regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
+      int regionCount = cluster.getRegions(hri.getTable()).size();
       regionStates.updateRegionState(hri, RegionState.State.CLOSING);
 
       // Now try splitting.... should fail.  And each should successfully
@@ -324,8 +324,7 @@ public class TestSplitTransactionOnCluster {
       // Wait around a while and assert count of regions remains constant.
       for (int i = 0; i < 10; i++) {
         Thread.sleep(100);
-        assertEquals(regionCount, ProtobufUtil.getOnlineRegions(
-          server.getRSRpcServices()).size());
+        assertEquals(regionCount, cluster.getRegions(hri.getTable()).size());
       }
       regionStates.updateRegionState(hri, State.OPEN);
       // Now try splitting and it should work.
@@ -367,13 +366,13 @@ public class TestSplitTransactionOnCluster {
       // Get region pre-split.
       HRegionServer server = cluster.getRegionServer(tableRegionIndex);
       printOutRegions(server, "Initial regions: ");
-      int regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
+      int regionCount = cluster.getRegions(hri.getTable()).size();
       // Now split.
       split(hri, server, regionCount);
       // Get daughters
       List<HRegion> daughters = checkAndGetDaughters(tableName);
       // Now split one of the daughters.
-      regionCount = ProtobufUtil.getOnlineRegions(server.getRSRpcServices()).size();
+      regionCount = cluster.getRegions(hri.getTable()).size();
       RegionInfo daughter = daughters.get(0).getRegionInfo();
       LOG.info("Daughter we are going to split: " + daughter);
       // Compact first to ensure we have cleaned up references -- else the split
@@ -809,14 +808,13 @@ public class TestSplitTransactionOnCluster {
 
   private void split(final RegionInfo hri, final HRegionServer server, final int regionCount)
       throws IOException, InterruptedException {
-    this.admin.splitRegion(hri.getRegionName());
-    for (int i = 0; this.cluster.getRegions(hri.getTable()).size() <= regionCount && i < 60; i++) {
+    admin.splitRegion(hri.getRegionName());
+    for (int i = 0; cluster.getRegions(hri.getTable()).size() <= regionCount && i < 60; i++) {
       LOG.debug("Waiting on region " + hri.getRegionNameAsString() + " to split");
       Thread.sleep(2000);
     }
-
     assertFalse("Waited too long for split",
-      this.cluster.getRegions(hri.getTable()).size() <= regionCount);
+        cluster.getRegions(hri.getTable()).size() <= regionCount);
   }
 
   /**


[07/24] hbase git commit: HBASE-19122 Suspect methods on Cell to be deprecated; ADDENDUM2 to fix more compile error

Posted by zh...@apache.org.
 HBASE-19122 Suspect methods on Cell to be deprecated; ADDENDUM2 to fix
    more compile error


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e343b0c3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e343b0c3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e343b0c3

Branch: refs/heads/HBASE-19397
Commit: e343b0c3e3affbab44604231486a42dba6ee6f1e
Parents: 6a9b148
Author: Michael Stack <st...@apache.org>
Authored: Mon Dec 18 16:12:21 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Mon Dec 18 16:12:21 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hbase/RawCellBuilder.java | 62 ++++++++++++++++++++
 .../hadoop/hbase/RawCellBuilderFactory.java     | 39 ++++++++++++
 2 files changed, 101 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/e343b0c3/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilder.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilder.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilder.java
new file mode 100644
index 0000000..22bb358
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilder.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import java.util.List;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Allows creating a cell with {@link Tag}
+ */
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC)
+public interface RawCellBuilder extends CellBuilder {
+  @Override
+  RawCellBuilder setRow(final byte[] row);
+  @Override
+  RawCellBuilder setRow(final byte[] row, final int rOffset, final int rLength);
+
+  @Override
+  RawCellBuilder setFamily(final byte[] family);
+  @Override
+  RawCellBuilder setFamily(final byte[] family, final int fOffset, final int fLength);
+
+  @Override
+  RawCellBuilder setQualifier(final byte[] qualifier);
+  @Override
+  RawCellBuilder setQualifier(final byte[] qualifier, final int qOffset, final int qLength);
+
+  @Override
+  RawCellBuilder setTimestamp(final long timestamp);
+
+  @Override
+  RawCellBuilder setType(final Cell.DataType type);
+
+  @Override
+  RawCellBuilder setValue(final byte[] value);
+  @Override
+  RawCellBuilder setValue(final byte[] value, final int vOffset, final int vLength);
+
+  RawCellBuilder setTags(final List<Tag> tags);
+
+  @Override
+  RawCell build();
+
+  @Override
+  RawCellBuilder clear();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/e343b0c3/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilderFactory.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilderFactory.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilderFactory.java
new file mode 100644
index 0000000..8d0c1ab
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/RawCellBuilderFactory.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * Factory for creating cells for CPs. It does deep_copy {@link CellBuilderType#DEEP_COPY} while
+ * creating cells.
+ */
+@InterfaceAudience.Private
+public final class RawCellBuilderFactory {
+
+  /**
+   * @return the cell that is created
+   */
+  public static RawCellBuilder create() {
+    return new KeyValueBuilder();
+  }
+
+  private RawCellBuilderFactory() {
+  }
+}


[19/24] hbase git commit: HBASE-19556: Remove TestAssignmentManager#testGoodSplit, which no longer make sense

Posted by zh...@apache.org.
HBASE-19556: Remove TestAssignmentManager#testGoodSplit, which no longer make sense


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70608acf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70608acf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70608acf

Branch: refs/heads/HBASE-19397
Commit: 70608acf289f6de4aeb8f159667a1f21397ec433
Parents: 32f6fd4
Author: Yi Liang <ea...@gmail.com>
Authored: Tue Dec 19 11:31:44 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 19 13:55:05 2017 -0800

----------------------------------------------------------------------
 .../assignment/TestAssignmentManager.java       | 34 --------------------
 1 file changed, 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/70608acf/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index 21f7ffb..f4365ea 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -169,36 +169,6 @@ public class TestAssignmentManager {
     if (this.am.waitServerReportEvent(null, null)) throw new UnexpectedStateException();
   }
 
-  @Ignore @Test // TODO
-  public void testGoodSplit() throws Exception {
-    TableName tableName = TableName.valueOf(this.name.getMethodName());
-    RegionInfo hri = RegionInfoBuilder.newBuilder(tableName)
-        .setStartKey(Bytes.toBytes(0))
-        .setEndKey(Bytes.toBytes(2))
-        .setSplit(false)
-        .setRegionId(0)
-        .build();
-    SplitTableRegionProcedure split =
-        new SplitTableRegionProcedure(this.master.getMasterProcedureExecutor().getEnvironment(),
-            hri, Bytes.toBytes(1));
-    rsDispatcher.setMockRsExecutor(new GoodSplitExecutor());
-    long st = System.currentTimeMillis();
-    Thread t = new Thread() {
-      public void run() {
-        try {
-          waitOnFuture(submitProcedure(split));
-        } catch (Exception e) {
-          e.printStackTrace();
-        }
-      }
-    };
-    t.start();
-    t.join();
-    long et = System.currentTimeMillis();
-    float sec = ((et - st) / 1000.0f);
-    LOG.info(String.format("[T] Splitting in %s", StringUtils.humanTimeDiff(et - st)));
-  }
-
   @Test
   public void testAssignWithGoodExec() throws Exception {
     // collect AM metrics before test
@@ -865,10 +835,6 @@ public class TestAssignmentManager {
     }
   }
 
-  private class GoodSplitExecutor extends NoopRsExecutor {
-
-  }
-
   private void collectAssignmentManagerMetrics() {
     assignSubmittedCount = assignProcMetrics.getSubmittedCounter().getCount();
     assignFailedCount = assignProcMetrics.getFailedCounter().getCount();


[20/24] hbase git commit: HBASE-15482 Provide an option to skip calculating block locations for SnapshotInputFormat

Posted by zh...@apache.org.
HBASE-15482 Provide an option to skip calculating block locations for SnapshotInputFormat

Signed-off-by: tedyu <yu...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e7d16a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e7d16a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e7d16a3

Branch: refs/heads/HBASE-19397
Commit: 5e7d16a3ceaeec5057474f9bae2d40d306f6dd8e
Parents: 70608ac
Author: Xiang Li <wa...@gmail.com>
Authored: Thu Dec 7 01:06:33 2017 +0800
Committer: tedyu <yu...@gmail.com>
Committed: Tue Dec 19 15:52:16 2017 -0800

----------------------------------------------------------------------
 .../mapreduce/TableSnapshotInputFormatImpl.java | 85 ++++++++++++++------
 .../mapred/TestTableSnapshotInputFormat.java    | 27 ++++++-
 .../TableSnapshotInputFormatTestBase.java       |  7 +-
 .../mapreduce/TestTableSnapshotInputFormat.java | 23 +++++-
 4 files changed, 110 insertions(+), 32 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e7d16a3/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
index bee4926..53eb9f4 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
@@ -70,7 +70,7 @@ public class TableSnapshotInputFormatImpl {
   // key for specifying the root dir of the restored snapshot
   protected static final String RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
 
-  /** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution)} */
+  /** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution, int)} */
   private static final String LOCALITY_CUTOFF_MULTIPLIER =
     "hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
   private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
@@ -87,6 +87,19 @@ public class TableSnapshotInputFormatImpl {
   public static final String NUM_SPLITS_PER_REGION = "hbase.mapreduce.splits.per.region";
 
   /**
+   * Whether to calculate the block location for splits. Default to true.
+   * If the computing layer runs outside of HBase cluster, the block locality does not master.
+   * Setting this value to false could skip the calculation and save some time.
+   *
+   * Set access modifier to "public" so that these could be accessed by test classes of
+   * both org.apache.hadoop.hbase.mapred
+   * and  org.apache.hadoop.hbase.mapreduce.
+   */
+  public static final String  SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY =
+      "hbase.TableSnapshotInputFormat.locality.enabled";
+  public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT = true;
+
+  /**
    * Implementation class for InputSplit logic common between mapred and mapreduce.
    */
   public static class InputSplit implements Writable {
@@ -356,6 +369,9 @@ public class TableSnapshotInputFormatImpl {
 
     Path tableDir = FSUtils.getTableDir(restoreDir, htd.getTableName());
 
+    boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY,
+                                              SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT);
+
     List<InputSplit> splits = new ArrayList<>();
     for (HRegionInfo hri : regionManifests) {
       // load region descriptor
@@ -365,36 +381,42 @@ public class TableSnapshotInputFormatImpl {
         for (int i = 0; i < sp.length - 1; i++) {
           if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i],
                   sp[i + 1])) {
-            // compute HDFS locations from snapshot files (which will get the locations for
-            // referred hfiles)
-            List<String> hosts = getBestLocations(conf,
-                    HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
+            List<String> hosts =
+                calculateLocationsForInputSplit(conf, htd, hri, tableDir, localityEnabled);
 
-            int len = Math.min(3, hosts.size());
-            hosts = hosts.subList(0, len);
             Scan boundedScan = new Scan(scan);
             boundedScan.setStartRow(sp[i]);
             boundedScan.setStopRow(sp[i + 1]);
+
             splits.add(new InputSplit(htd, hri, hosts, boundedScan, restoreDir));
           }
         }
       } else {
         if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(),
-          hri.getStartKey(), hri.getEndKey())) {
-          // compute HDFS locations from snapshot files (which will get the locations for
-          // referred hfiles)
-          List<String> hosts = getBestLocations(conf,
-                  HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
-
-          int len = Math.min(3, hosts.size());
-          hosts = hosts.subList(0, len);
+            hri.getStartKey(), hri.getEndKey())) {
+          List<String> hosts =
+              calculateLocationsForInputSplit(conf, htd, hri, tableDir, localityEnabled);
           splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir));
         }
       }
     }
 
     return splits;
+  }
 
+  /**
+   * Compute block locations for snapshot files (which will get the locations for referred hfiles)
+   * only when localityEnabled is true.
+   */
+  private static List<String> calculateLocationsForInputSplit(Configuration conf,
+      TableDescriptor htd, HRegionInfo hri, Path tableDir, boolean localityEnabled)
+      throws IOException {
+    if (localityEnabled) { // care block locality
+      return getBestLocations(conf,
+                              HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir));
+    } else { // do not care block locality
+      return null;
+    }
   }
 
   /**
@@ -408,30 +430,41 @@ public class TableSnapshotInputFormatImpl {
    * we are doing a simple heuristic, where we will pass all hosts which have at least 80%
    * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top
    * host with the best locality.
+   * Return at most numTopsAtMost locations if there are more than that.
    */
-  public static List<String> getBestLocations(
-      Configuration conf, HDFSBlocksDistribution blockDistribution) {
-    List<String> locations = new ArrayList<>(3);
-
+  private static List<String> getBestLocations(Configuration conf,
+      HDFSBlocksDistribution blockDistribution, int numTopsAtMost) {
     HostAndWeight[] hostAndWeights = blockDistribution.getTopHostsWithWeights();
 
-    if (hostAndWeights.length == 0) {
-      return locations;
+    if (hostAndWeights.length == 0) { // no matter what numTopsAtMost is
+      return null;
     }
 
+    if (numTopsAtMost < 1) { // invalid if numTopsAtMost < 1, correct it to be 1
+      numTopsAtMost = 1;
+    }
+    int top = Math.min(numTopsAtMost, hostAndWeights.length);
+    List<String> locations = new ArrayList<>(top);
     HostAndWeight topHost = hostAndWeights[0];
     locations.add(topHost.getHost());
 
-    // Heuristic: filter all hosts which have at least cutoffMultiplier % of block locality
+    if (top == 1) { // only care about the top host
+      return locations;
+    }
+
+    // When top >= 2,
+    // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality
     double cutoffMultiplier
             = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER);
 
     double filterWeight = topHost.getWeight() * cutoffMultiplier;
 
-    for (int i = 1; i < hostAndWeights.length; i++) {
+    for (int i = 1; i <= top - 1; i++) {
       if (hostAndWeights[i].getWeight() >= filterWeight) {
         locations.add(hostAndWeights[i].getHost());
       } else {
+        // As hostAndWeights is in descending order,
+        // we could break the loop as long as we meet a weight which is less than filterWeight.
         break;
       }
     }
@@ -439,6 +472,12 @@ public class TableSnapshotInputFormatImpl {
     return locations;
   }
 
+  public static List<String> getBestLocations(Configuration conf,
+      HDFSBlocksDistribution blockDistribution) {
+    // 3 nodes will contain highly local blocks. So default to 3.
+    return getBestLocations(conf, blockDistribution, 3);
+  }
+
   private static String getSnapshotName(Configuration conf) {
     String snapshotName = conf.get(SNAPSHOT_NAME_KEY);
     if (snapshotName == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e7d16a3/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java
index be36b6a..8b4e918 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hbase.mapred;
 
+import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT;
 import static org.mockito.Mockito.mock;
 
 import org.apache.hadoop.fs.Path;
@@ -138,7 +139,10 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
   @Test
   @Override
   public void testWithMockedMapReduceMultiRegion() throws Exception {
-    testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10);
+    testWithMockedMapReduce(
+        UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true);
+        // It does not matter whether true or false is given to setLocalityEnabledTo,
+        // because it is not read in testWithMockedMapReduce().
   }
 
   @Test
@@ -165,7 +169,8 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
 
   @Override
   protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
-      int numRegions, int numSplitsPerRegion, int expectedNumSplits) throws Exception {
+      int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
+      throws Exception {
     setupCluster();
     final TableName tableName = TableName.valueOf(name.getMethodName());
     try {
@@ -173,6 +178,9 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
         util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
 
       JobConf job = new JobConf(util.getConfiguration());
+      // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that
+      // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified
+      // and the default value is taken.
       Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
 
       if (numSplitsPerRegion > 1) {
@@ -206,10 +214,25 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
     HBaseTestingUtility.SeenRowTracker rowTracker =
       new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
 
+    // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified,
+    // so the default value is taken.
+    boolean localityEnabled = SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT;
+
     for (int i = 0; i < splits.length; i++) {
       // validate input split
       InputSplit split = splits[i];
       Assert.assertTrue(split instanceof TableSnapshotInputFormat.TableSnapshotRegionSplit);
+      if (localityEnabled) {
+        // When localityEnabled is true, meant to verify split.getLocations()
+        // by the following statement:
+        //   Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0);
+        // However, getLocations() of some splits could return an empty array (length is 0),
+        // so drop the verification on length.
+        // TODO: investigate how to verify split.getLocations() when localityEnabled is true
+        Assert.assertTrue(split.getLocations() != null);
+      } else {
+        Assert.assertTrue(split.getLocations() != null && split.getLocations().length == 0);
+      }
 
       // validate record reader
       OutputCollector collector = mock(OutputCollector.class);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e7d16a3/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
index 362dca1..4e11275 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java
@@ -78,7 +78,8 @@ public abstract class TableSnapshotInputFormatTestBase {
   }
 
   protected abstract void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
-    int numRegions, int numSplitsPerRegion, int expectedNumSplits) throws Exception;
+    int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
+    throws Exception;
 
   protected abstract void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName,
     String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, int expectedNumSplits,
@@ -90,12 +91,12 @@ public abstract class TableSnapshotInputFormatTestBase {
 
   @Test
   public void testWithMockedMapReduceSingleRegion() throws Exception {
-    testWithMockedMapReduce(UTIL, "testWithMockedMapReduceSingleRegion", 1, 1, 1);
+    testWithMockedMapReduce(UTIL, "testWithMockedMapReduceSingleRegion", 1, 1, 1, true);
   }
 
   @Test
   public void testWithMockedMapReduceMultiRegion() throws Exception {
-    testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 8);
+    testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 8, false);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e7d16a3/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
index 890eb2f..2ed6081 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.hbase.mapreduce;
 
+import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY;
+
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -98,7 +101,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
     Configuration conf = UTIL.getConfiguration();
 
     HDFSBlocksDistribution blockDistribution = new HDFSBlocksDistribution();
-    Assert.assertEquals(Lists.newArrayList(),
+    Assert.assertEquals(null,
       TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution));
 
     blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1);
@@ -132,7 +135,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
     blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6);
     blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9);
 
-    Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4", "h1"),
+    Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"),
       TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution));
   }
 
@@ -210,14 +213,17 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
 
   @Override
   public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
-      int numRegions, int numSplitsPerRegion, int expectedNumSplits) throws Exception {
+      int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
+      throws Exception {
     setupCluster();
     final TableName tableName = TableName.valueOf(name.getMethodName());
     try {
       createTableAndSnapshot(
         util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);
 
-      Job job = new Job(util.getConfiguration());
+      Configuration conf = util.getConfiguration();
+      conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo);
+      Job job = new Job(conf);
       Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
       Scan scan = new Scan(getStartRow(), getEndRow()); // limit the scan
 
@@ -304,10 +310,19 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa
     HBaseTestingUtility.SeenRowTracker rowTracker =
         new HBaseTestingUtility.SeenRowTracker(startRow, stopRow);
 
+    boolean localityEnabled =
+        job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY,
+                                          SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT);
+
     for (int i = 0; i < splits.size(); i++) {
       // validate input split
       InputSplit split = splits.get(i);
       Assert.assertTrue(split instanceof TableSnapshotRegionSplit);
+      if (localityEnabled) {
+        Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0);
+      } else {
+        Assert.assertTrue(split.getLocations() != null && split.getLocations().length == 0);
+      }
 
       // validate record reader
       TaskAttemptContext taskAttemptContext = mock(TaskAttemptContext.class);


[10/24] hbase git commit: HBASE-19492 Add EXCLUDE_NAMESPACE and EXCLUDE_TABLECFS support to replication peer config

Posted by zh...@apache.org.
HBASE-19492 Add EXCLUDE_NAMESPACE and EXCLUDE_TABLECFS support to replication peer config


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/03e79b79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/03e79b79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/03e79b79

Branch: refs/heads/HBASE-19397
Commit: 03e79b79949a63f7320b4c51fecca40f93659bd9
Parents: 7a7e55b
Author: Guanghao Zhang <zg...@apache.org>
Authored: Tue Dec 12 11:19:57 2017 +0800
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Tue Dec 19 16:53:43 2017 +0800

----------------------------------------------------------------------
 .../replication/ReplicationPeerConfigUtil.java  |  35 ++-
 .../replication/ReplicationPeerConfig.java      |  65 ++++--
 .../src/main/protobuf/Replication.proto         |   2 +
 .../replication/ReplicationPeersZKImpl.java     |   2 +
 .../master/replication/ReplicationManager.java  |  51 +++--
 .../NamespaceTableCfWALEntryFilter.java         | 101 ++++++---
 .../replication/TestReplicationAdmin.java       | 219 ++++++++++++++++---
 .../TestReplicationWALEntryFilters.java         | 131 +++++++++--
 .../src/main/ruby/hbase/replication_admin.rb    |  49 ++++-
 hbase-shell/src/main/ruby/shell.rb              |   2 +
 .../src/main/ruby/shell/commands/list_peers.rb  |  17 +-
 .../commands/set_peer_exclude_namespaces.rb     |  52 +++++
 .../shell/commands/set_peer_exclude_tableCFs.rb |  51 +++++
 .../ruby/shell/commands/set_peer_namespaces.rb  |   8 +-
 .../shell/commands/set_peer_replicate_all.rb    |   8 +-
 .../ruby/shell/commands/set_peer_tableCFs.rb    |   6 +-
 .../test/ruby/hbase/replication_admin_test.rb   |  42 ++++
 17 files changed, 715 insertions(+), 126 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
index 52a3c93..d8c86f0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationPeerConfigUtil.java
@@ -48,6 +48,7 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.ArrayList;
 import java.util.Set;
+import java.util.stream.Collectors;
 
 /**
  * Helper for TableCFs Operations.
@@ -289,11 +290,8 @@ public final class ReplicationPeerConfigUtil {
 
     List<ByteString> namespacesList = peer.getNamespacesList();
     if (namespacesList != null && namespacesList.size() != 0) {
-      Set<String> namespaces = new HashSet<>();
-      for (ByteString namespace : namespacesList) {
-        namespaces.add(namespace.toStringUtf8());
-      }
-      peerConfig.setNamespaces(namespaces);
+      peerConfig.setNamespaces(
+        namespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
     }
 
     if (peer.hasBandwidth()) {
@@ -304,6 +302,19 @@ public final class ReplicationPeerConfigUtil {
       peerConfig.setReplicateAllUserTables(peer.getReplicateAll());
     }
 
+    Map<TableName, ? extends Collection<String>> excludeTableCFsMap =
+        convert2Map(peer.getExcludeTableCfsList()
+            .toArray(new ReplicationProtos.TableCF[peer.getExcludeTableCfsCount()]));
+    if (excludeTableCFsMap != null) {
+      peerConfig.setExcludeTableCFsMap(excludeTableCFsMap);
+    }
+
+    List<ByteString> excludeNamespacesList = peer.getExcludeNamespacesList();
+    if (excludeNamespacesList != null && excludeNamespacesList.size() != 0) {
+      peerConfig.setExcludeNamespaces(
+        excludeNamespacesList.stream().map(ByteString::toStringUtf8).collect(Collectors.toSet()));
+    }
+
     return peerConfig;
   }
 
@@ -346,6 +357,20 @@ public final class ReplicationPeerConfigUtil {
 
     builder.setBandwidth(peerConfig.getBandwidth());
     builder.setReplicateAll(peerConfig.replicateAllUserTables());
+
+    ReplicationProtos.TableCF[] excludeTableCFs = convert(peerConfig.getExcludeTableCFsMap());
+    if (excludeTableCFs != null) {
+      for (int i = 0; i < excludeTableCFs.length; i++) {
+        builder.addExcludeTableCfs(excludeTableCFs[i]);
+      }
+    }
+    Set<String> excludeNamespaces = peerConfig.getExcludeNamespaces();
+    if (excludeNamespaces != null) {
+      for (String namespace : excludeNamespaces) {
+        builder.addExcludeNamespaces(ByteString.copyFromUtf8(namespace));
+      }
+    }
+
     return builder.build();
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
index 9e20829..52a5fe9 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfig.java
@@ -44,6 +44,8 @@ public class ReplicationPeerConfig {
   private long bandwidth = 0;
   // Default value is true, means replicate all user tables to peer cluster.
   private boolean replicateAllUserTables = true;
+  private Map<TableName, ? extends Collection<String>> excludeTableCFsMap = null;
+  private Set<String> excludeNamespaces = null;
 
   public ReplicationPeerConfig() {
     this.peerData = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -121,16 +123,44 @@ public class ReplicationPeerConfig {
     return this;
   }
 
+  public Map<TableName, List<String>> getExcludeTableCFsMap() {
+    return (Map<TableName, List<String>>) excludeTableCFsMap;
+  }
+
+  public ReplicationPeerConfig setExcludeTableCFsMap(Map<TableName,
+                                              ? extends Collection<String>> tableCFsMap) {
+    this.excludeTableCFsMap = tableCFsMap;
+    return this;
+  }
+
+  public Set<String> getExcludeNamespaces() {
+    return this.excludeNamespaces;
+  }
+
+  public ReplicationPeerConfig setExcludeNamespaces(Set<String> namespaces) {
+    this.excludeNamespaces = namespaces;
+    return this;
+  }
+
   @Override
   public String toString() {
     StringBuilder builder = new StringBuilder("clusterKey=").append(clusterKey).append(",");
     builder.append("replicationEndpointImpl=").append(replicationEndpointImpl).append(",");
     builder.append("replicateAllUserTables=").append(replicateAllUserTables).append(",");
-    if (namespaces != null) {
-      builder.append("namespaces=").append(namespaces.toString()).append(",");
-    }
-    if (tableCFsMap != null) {
-      builder.append("tableCFs=").append(tableCFsMap.toString()).append(",");
+    if (replicateAllUserTables) {
+      if (excludeNamespaces != null) {
+        builder.append("excludeNamespaces=").append(excludeNamespaces.toString()).append(",");
+      }
+      if (excludeTableCFsMap != null) {
+        builder.append("excludeTableCFsMap=").append(excludeTableCFsMap.toString()).append(",");
+      }
+    } else {
+      if (namespaces != null) {
+        builder.append("namespaces=").append(namespaces.toString()).append(",");
+      }
+      if (tableCFsMap != null) {
+        builder.append("tableCFs=").append(tableCFsMap.toString()).append(",");
+      }
     }
     builder.append("bandwidth=").append(bandwidth);
     return builder.toString();
@@ -142,17 +172,22 @@ public class ReplicationPeerConfig {
    * @return true if the table need replicate to the peer cluster
    */
   public boolean needToReplicate(TableName table) {
-    // If null means user has explicitly not configured any namespaces and table CFs
-    // so all the tables data are applicable for replication
-    if (namespaces == null && tableCFsMap == null) {
-      return true;
-    }
-    if (namespaces != null && namespaces.contains(table.getNamespaceAsString())) {
-      return true;
-    }
-    if (tableCFsMap != null && tableCFsMap.containsKey(table)) {
+    if (replicateAllUserTables) {
+      if (excludeNamespaces != null && excludeNamespaces.contains(table.getNamespaceAsString())) {
+        return false;
+      }
+      if (excludeTableCFsMap != null && excludeTableCFsMap.containsKey(table)) {
+        return false;
+      }
       return true;
+    } else {
+      if (namespaces != null && namespaces.contains(table.getNamespaceAsString())) {
+        return true;
+      }
+      if (tableCFsMap != null && tableCFsMap.containsKey(table)) {
+        return true;
+      }
+      return false;
     }
-    return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index a1a7ade..8657c25 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -46,6 +46,8 @@ message ReplicationPeer {
   repeated bytes namespaces = 6;
   optional int64 bandwidth = 7;
   optional bool replicate_all = 8;
+  repeated TableCF exclude_table_cfs = 9;
+  repeated bytes exclude_namespaces = 10;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index 2c3bbd5..ca99f65 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -369,6 +369,8 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
     existingConfig.setNamespaces(newConfig.getNamespaces());
     existingConfig.setBandwidth(newConfig.getBandwidth());
     existingConfig.setReplicateAllUserTables(newConfig.replicateAllUserTables());
+    existingConfig.setExcludeNamespaces(newConfig.getExcludeNamespaces());
+    existingConfig.setExcludeTableCFsMap(newConfig.getExcludeTableCFsMap());
 
     try {
       ZKUtil.setData(this.zookeeper, getPeerNode(id),

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
index f2a6c85..749448d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -118,15 +118,32 @@ public class ReplicationManager {
     return peers;
   }
 
-  private void checkPeerConfig(ReplicationPeerConfig peerConfig) throws ReplicationException,
-      IOException {
+  /**
+   * If replicate_all flag is true, it means all user tables will be replicated to peer cluster.
+   * Then allow config exclude namespaces or exclude table-cfs which can't be replicated to
+   * peer cluster.
+   *
+   * If replicate_all flag is false, it means all user tables can't be replicated to peer cluster.
+   * Then allow to config namespaces or table-cfs which will be replicated to peer cluster.
+   */
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig)
+      throws ReplicationException, IOException {
     if (peerConfig.replicateAllUserTables()) {
       if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty())
           || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) {
-        throw new ReplicationException(
-          "Need clean namespaces or table-cfs config fisrtly when you want replicate all cluster");
+        throw new ReplicationException("Need clean namespaces or table-cfs config firstly"
+            + " when replicate_all flag is true");
       }
+      checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
+        peerConfig.getExcludeTableCFsMap());
     } else {
+      if ((peerConfig.getExcludeNamespaces() != null && !peerConfig.getNamespaces().isEmpty())
+          || (peerConfig.getExcludeTableCFsMap() != null
+              && !peerConfig.getTableCFsMap().isEmpty())) {
+        throw new ReplicationException(
+            "Need clean exclude-namespaces or exclude-table-cfs config firstly"
+                + " when replicate_all flag is false");
+      }
       checkNamespacesAndTableCfsConfigConflict(peerConfig.getNamespaces(),
         peerConfig.getTableCFsMap());
     }
@@ -134,17 +151,19 @@ public class ReplicationManager {
   }
 
   /**
-   * Set a namespace in the peer config means that all tables in this namespace
-   * will be replicated to the peer cluster.
+   * Set a namespace in the peer config means that all tables in this namespace will be replicated
+   * to the peer cluster.
+   * 1. If peer config already has a namespace, then not allow set any table of this namespace
+   *    to the peer config.
+   * 2. If peer config already has a table, then not allow set this table's namespace to the peer
+   *    config.
    *
-   * 1. If you already have set a namespace in the peer config, then you can't set any table
-   *    of this namespace to the peer config.
-   * 2. If you already have set a table in the peer config, then you can't set this table's
-   *    namespace to the peer config.
-   *
-   * @param namespaces
-   * @param tableCfs
-   * @throws ReplicationException
+   * Set a exclude namespace in the peer config means that all tables in this namespace can't be
+   * replicated to the peer cluster.
+   * 1. If peer config already has a exclude namespace, then not allow set any exclude table of
+   *    this namespace to the peer config.
+   * 2. If peer config already has a exclude table, then not allow set this table's namespace
+   *    as a exclude namespace.
    */
   private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces,
       Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException {
@@ -157,8 +176,8 @@ public class ReplicationManager {
     for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
       TableName table = entry.getKey();
       if (namespaces.contains(table.getNamespaceAsString())) {
-        throw new ReplicationException(
-            "Table-cfs config conflict with namespaces config in peer");
+        throw new ReplicationException("Table-cfs " + table + " is conflict with namespaces "
+            + table.getNamespaceAsString() + " in peer config");
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java
index 9a4cc6c..5068cce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/NamespaceTableCfWALEntryFilter.java
@@ -32,16 +32,17 @@ import org.apache.hadoop.hbase.wal.WALEdit;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.wal.WAL.Entry;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Predicate;
-
 /**
- * Filter a WAL Entry by namespaces and table-cfs config in the peer. It first filter entry
- * by namespaces config, then filter entry by table-cfs config.
+ * Filter a WAL Entry by the peer config: replicate_all flag, namespaces config, table-cfs config,
+ * exclude namespaces config, and exclude table-cfs config.
+ *
+ * If replicate_all flag is true, it means all user tables will be replicated to peer cluster. But
+ * you can set exclude namespaces or exclude table-cfs which can't be replicated to peer cluster.
+ * Note: set a exclude namespace means that all tables in this namespace can't be replicated.
  *
- * 1. Set a namespace in peer config means that all tables in this namespace will be replicated.
- * 2. If the namespaces config is null, then the table-cfs config decide which table's edit
- *    can be replicated. If the table-cfs config is null, then the namespaces config decide
- *    which table's edit can be replicated.
+ * If replicate_all flag is false, it means all user tables can't be replicated to peer cluster.
+ * But you can set namespaces or table-cfs which will be replicated to peer cluster.
+ * Note: set a namespace means that all tables in this namespace will be replicated.
  */
 @InterfaceAudience.Private
 public class NamespaceTableCfWALEntryFilter implements WALEntryFilter, WALCellFilter {
@@ -61,7 +62,15 @@ public class NamespaceTableCfWALEntryFilter implements WALEntryFilter, WALCellFi
     ReplicationPeerConfig peerConfig = this.peer.getPeerConfig();
 
     if (peerConfig.replicateAllUserTables()) {
-      // replicate all user tables, so return entry directly
+      // replicate all user tables, but filter by exclude namespaces config
+      Set<String> excludeNamespaces = peerConfig.getExcludeNamespaces();
+
+      // return null(prevent replicating) if logKey's table is in this peer's
+      // exclude namespaces list
+      if (excludeNamespaces != null && excludeNamespaces.contains(namespace)) {
+        return null;
+      }
+
       return entry;
     } else {
       // Not replicate all user tables, so filter by namespaces and table-cfs config
@@ -80,7 +89,7 @@ public class NamespaceTableCfWALEntryFilter implements WALEntryFilter, WALCellFi
 
       // Then filter by table-cfs config
       // return null(prevent replicating) if logKey's table isn't in this peer's
-      // replicaable namespace list and table list
+      // replicable tables list
       if (tableCFs == null || !tableCFs.containsKey(tabName)) {
         return null;
       }
@@ -93,34 +102,39 @@ public class NamespaceTableCfWALEntryFilter implements WALEntryFilter, WALCellFi
   public Cell filterCell(final Entry entry, Cell cell) {
     ReplicationPeerConfig peerConfig = this.peer.getPeerConfig();
     if (peerConfig.replicateAllUserTables()) {
-      // replicate all user tables, so return cell directly
+      // replicate all user tables, but filter by exclude table-cfs config
+      final Map<TableName, List<String>> excludeTableCfs = peerConfig.getExcludeTableCFsMap();
+      if (excludeTableCfs == null) {
+        return cell;
+      }
+
+      if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) {
+        cell = bulkLoadFilter.filterCell(cell,
+          fam -> filterByExcludeTableCfs(entry.getKey().getTablename(), Bytes.toString(fam),
+            excludeTableCfs));
+      } else {
+        if (filterByExcludeTableCfs(entry.getKey().getTablename(),
+          Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()),
+          excludeTableCfs)) {
+          return null;
+        }
+      }
+
       return cell;
     } else {
+      // not replicate all user tables, so filter by table-cfs config
       final Map<TableName, List<String>> tableCfs = peerConfig.getTableCFsMap();
       if (tableCfs == null) {
         return cell;
       }
-      TableName tabName = entry.getKey().getTablename();
-      List<String> cfs = tableCfs.get(tabName);
-      // ignore(remove) kv if its cf isn't in the replicable cf list
-      // (empty cfs means all cfs of this table are replicable)
+
       if (CellUtil.matchingColumn(cell, WALEdit.METAFAMILY, WALEdit.BULK_LOAD)) {
-        cell = bulkLoadFilter.filterCell(cell, new Predicate<byte[]>() {
-          @Override
-          public boolean apply(byte[] fam) {
-            if (tableCfs != null) {
-              List<String> cfs = tableCfs.get(entry.getKey().getTablename());
-              if (cfs != null && !cfs.contains(Bytes.toString(fam))) {
-                return true;
-              }
-            }
-            return false;
-          }
-        });
+        cell = bulkLoadFilter.filterCell(cell,
+          fam -> filterByTableCfs(entry.getKey().getTablename(), Bytes.toString(fam), tableCfs));
       } else {
-        if ((cfs != null)
-            && !cfs.contains(Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(),
-              cell.getFamilyLength()))) {
+        if (filterByTableCfs(entry.getKey().getTablename(),
+          Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()),
+          tableCfs)) {
           return null;
         }
       }
@@ -128,4 +142,31 @@ public class NamespaceTableCfWALEntryFilter implements WALEntryFilter, WALCellFi
       return cell;
     }
   }
+
+  private boolean filterByExcludeTableCfs(TableName tableName, String family,
+      Map<TableName, List<String>> excludeTableCfs) {
+    List<String> excludeCfs = excludeTableCfs.get(tableName);
+    if (excludeCfs != null) {
+      // empty cfs means all cfs of this table are excluded
+      if (excludeCfs.isEmpty()) {
+        return true;
+      }
+      // ignore(remove) kv if its cf is in the exclude cfs list
+      if (excludeCfs.contains(family)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  private boolean filterByTableCfs(TableName tableName, String family,
+      Map<TableName, List<String>> tableCfs) {
+    List<String> cfs = tableCfs.get(tableName);
+    // ignore(remove) kv if its cf isn't in the replicable cf list
+    // (empty cfs means all cfs of this table are replicable)
+    if (cfs != null && !cfs.contains(family)) {
+      return true;
+    }
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
index 19f117b..83a2e12 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/replication/TestReplicationAdmin.java
@@ -483,8 +483,97 @@ public class TestReplicationAdmin {
   }
 
   @Test
+  public void testPeerExcludeNamespaces() throws Exception {
+    String ns1 = "ns1";
+    String ns2 = "ns2";
+
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(KEY_ONE);
+    hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
+
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
+    assertTrue(rpc.replicateAllUserTables());
+
+    Set<String> namespaces = new HashSet<String>();
+    namespaces.add(ns1);
+    namespaces.add(ns2);
+    rpc.setExcludeNamespaces(namespaces);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    namespaces = hbaseAdmin.getReplicationPeerConfig(ID_ONE).getExcludeNamespaces();
+    assertEquals(2, namespaces.size());
+    assertTrue(namespaces.contains(ns1));
+    assertTrue(namespaces.contains(ns2));
+
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
+    namespaces.clear();
+    namespaces.add(ns1);
+    rpc.setExcludeNamespaces(namespaces);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    namespaces = hbaseAdmin.getReplicationPeerConfig(ID_ONE).getExcludeNamespaces();
+    assertEquals(1, namespaces.size());
+    assertTrue(namespaces.contains(ns1));
+
+    hbaseAdmin.removeReplicationPeer(ID_ONE);
+  }
+
+  @Test
+  public void testPeerExcludeTableCFs() throws Exception {
+    ReplicationPeerConfig rpc = new ReplicationPeerConfig();
+    rpc.setClusterKey(KEY_ONE);
+    TableName tab1 = TableName.valueOf("t1");
+    TableName tab2 = TableName.valueOf("t2");
+    TableName tab3 = TableName.valueOf("t3");
+    TableName tab4 = TableName.valueOf("t4");
+
+    // Add a valid peer
+    hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
+    assertTrue(rpc.replicateAllUserTables());
+
+    Map<TableName, List<String>> tableCFs = new HashMap<TableName, List<String>>();
+    tableCFs.put(tab1, null);
+    rpc.setExcludeTableCFsMap(tableCFs);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    Map<TableName, List<String>> result =
+        hbaseAdmin.getReplicationPeerConfig(ID_ONE).getExcludeTableCFsMap();
+    assertEquals(1, result.size());
+    assertEquals(true, result.containsKey(tab1));
+    assertNull(result.get(tab1));
+
+    tableCFs.put(tab2, new ArrayList<String>());
+    tableCFs.get(tab2).add("f1");
+    rpc.setExcludeTableCFsMap(tableCFs);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    result = hbaseAdmin.getReplicationPeerConfig(ID_ONE).getExcludeTableCFsMap();
+    assertEquals(2, result.size());
+    assertTrue("Should contain t1", result.containsKey(tab1));
+    assertTrue("Should contain t2", result.containsKey(tab2));
+    assertNull(result.get(tab1));
+    assertEquals(1, result.get(tab2).size());
+    assertEquals("f1", result.get(tab2).get(0));
+
+    tableCFs.clear();
+    tableCFs.put(tab3, new ArrayList<String>());
+    tableCFs.put(tab4, new ArrayList<String>());
+    tableCFs.get(tab4).add("f1");
+    tableCFs.get(tab4).add("f2");
+    rpc.setExcludeTableCFsMap(tableCFs);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    result = hbaseAdmin.getReplicationPeerConfig(ID_ONE).getExcludeTableCFsMap();
+    assertEquals(2, result.size());
+    assertTrue("Should contain t3", result.containsKey(tab3));
+    assertTrue("Should contain t4", result.containsKey(tab4));
+    assertNull(result.get(tab3));
+    assertEquals(2, result.get(tab4).size());
+    assertEquals("f1", result.get(tab4).get(0));
+    assertEquals("f2", result.get(tab4).get(1));
+
+    hbaseAdmin.removeReplicationPeer(ID_ONE);
+  }
+
+  @Test
   public void testPeerConfigConflict() throws Exception {
-    // Default replicate all flag is true
+    // Default replicate_all flag is true
     ReplicationPeerConfig rpc = new ReplicationPeerConfig();
     rpc.setClusterKey(KEY_ONE);
 
@@ -492,14 +581,15 @@ public class TestReplicationAdmin {
     Set<String> namespaces = new HashSet<String>();
     namespaces.add(ns1);
 
-    TableName tab1 = TableName.valueOf("ns1:tabl");
+    TableName tab1 = TableName.valueOf("ns2:tabl");
     Map<TableName, List<String>> tableCfs = new HashMap<TableName, List<String>>();
     tableCfs.put(tab1, new ArrayList<String>());
 
     try {
       rpc.setNamespaces(namespaces);
       hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
-      fail("Should throw Exception. When replicate all flag is true, no need to config namespaces");
+      fail("Should throw Exception."
+          + " When replicate all flag is true, no need to config namespaces");
     } catch (IOException e) {
       // OK
       rpc.setNamespaces(null);
@@ -508,23 +598,51 @@ public class TestReplicationAdmin {
     try {
       rpc.setTableCFsMap(tableCfs);
       hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
-      fail("Should throw Exception. When replicate all flag is true, no need to config table-cfs");
+      fail("Should throw Exception."
+          + " When replicate all flag is true, no need to config table-cfs");
     } catch (IOException e) {
       // OK
       rpc.setTableCFsMap(null);
     }
 
+    // Set replicate_all flag to true
+    rpc.setReplicateAllUserTables(false);
     try {
-      rpc.setNamespaces(namespaces);
-      rpc.setTableCFsMap(tableCfs);
+      rpc.setExcludeNamespaces(namespaces);
       hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
       fail("Should throw Exception."
-          + " When replicate all flag is true, no need to config namespaces or table-cfs");
+          + " When replicate all flag is false, no need to config exclude namespaces");
     } catch (IOException e) {
       // OK
-      rpc.setNamespaces(null);
-      rpc.setTableCFsMap(null);
+      rpc.setExcludeNamespaces(null);
+    }
+
+    try {
+      rpc.setExcludeTableCFsMap(tableCfs);
+      hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
+      fail("Should throw Exception."
+          + " When replicate all flag is false, no need to config exclude table-cfs");
+    } catch (IOException e) {
+      // OK
+      rpc.setExcludeTableCFsMap(null);
     }
+
+    rpc.setNamespaces(namespaces);
+    rpc.setTableCFsMap(tableCfs);
+    // OK to add a new peer which replicate_all flag is false and with namespaces, table-cfs config
+    hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
+
+    // Default replicate_all flag is true
+    ReplicationPeerConfig rpc2 = new ReplicationPeerConfig();
+    rpc2.setClusterKey(KEY_SECOND);
+    rpc2.setExcludeNamespaces(namespaces);
+    rpc2.setExcludeTableCFsMap(tableCfs);
+    // OK to add a new peer which replicate_all flag is true and with exclude namespaces, exclude
+    // table-cfs config
+    hbaseAdmin.addReplicationPeer(ID_SECOND, rpc2);
+
+    hbaseAdmin.removeReplicationPeer(ID_ONE);
+    hbaseAdmin.removeReplicationPeer(ID_SECOND);
   }
 
   @Test
@@ -539,41 +657,80 @@ public class TestReplicationAdmin {
     rpc.setReplicateAllUserTables(false);
     hbaseAdmin.addReplicationPeer(ID_ONE, rpc);
 
-    rpc = admin.getPeerConfig(ID_ONE);
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
     Set<String> namespaces = new HashSet<String>();
     namespaces.add(ns1);
     rpc.setNamespaces(namespaces);
-    admin.updatePeerConfig(ID_ONE, rpc);
-    rpc = admin.getPeerConfig(ID_ONE);
-    Map<TableName, List<String>> tableCfs = new HashMap<>();
-    tableCfs.put(tableName1, new ArrayList<>());
-    rpc.setTableCFsMap(tableCfs);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
     try {
-      admin.updatePeerConfig(ID_ONE, rpc);
-      fail("Should throw ReplicationException, because table " + tableName1 + " conflict with namespace "
-          + ns1);
-    } catch (IOException e) {
+      Map<TableName, List<String>> tableCfs = new HashMap<>();
+      tableCfs.put(tableName1, new ArrayList<>());
+      rpc.setTableCFsMap(tableCfs);
+      hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+      fail("Should throw ReplicationException" + " Because table " + tableName1
+          + " conflict with namespace " + ns1);
+    } catch (Exception e) {
       // OK
     }
 
-    rpc = admin.getPeerConfig(ID_ONE);
-    tableCfs.clear();
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
+    Map<TableName, List<String>> tableCfs = new HashMap<>();
     tableCfs.put(tableName2, new ArrayList<>());
     rpc.setTableCFsMap(tableCfs);
-    admin.updatePeerConfig(ID_ONE, rpc);
-    rpc = admin.getPeerConfig(ID_ONE);
-    namespaces.clear();
-    namespaces.add(ns2);
-    rpc.setNamespaces(namespaces);
+    hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+    rpc = hbaseAdmin.getReplicationPeerConfig(ID_ONE);
     try {
-      admin.updatePeerConfig(ID_ONE, rpc);
-      fail("Should throw ReplicationException, because namespace " + ns2 + " conflict with table "
-          + tableName2);
-    } catch (IOException e) {
+      namespaces.clear();
+      namespaces.add(ns2);
+      rpc.setNamespaces(namespaces);
+      hbaseAdmin.updateReplicationPeerConfig(ID_ONE, rpc);
+      fail("Should throw ReplicationException" + " Because namespace " + ns2
+          + " conflict with table " + tableName2);
+    } catch (Exception e) {
       // OK
     }
 
-    admin.removePeer(ID_ONE);
+    ReplicationPeerConfig rpc2 = new ReplicationPeerConfig();
+    rpc2.setClusterKey(KEY_SECOND);
+    hbaseAdmin.addReplicationPeer(ID_SECOND, rpc2);
+
+    rpc2 = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+    Set<String> excludeNamespaces = new HashSet<String>();
+    excludeNamespaces.add(ns1);
+    rpc2.setExcludeNamespaces(excludeNamespaces);
+    hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, rpc2);
+    rpc2 = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+    try {
+      Map<TableName, List<String>> excludeTableCfs = new HashMap<>();
+      excludeTableCfs.put(tableName1, new ArrayList<>());
+      rpc2.setExcludeTableCFsMap(excludeTableCfs);
+      hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, rpc2);
+      fail("Should throw ReplicationException" + " Because exclude table " + tableName1
+          + " conflict with exclude namespace " + ns1);
+    } catch (Exception e) {
+      // OK
+    }
+
+    rpc2 = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+    Map<TableName, List<String>> excludeTableCfs = new HashMap<>();
+    excludeTableCfs.put(tableName2, new ArrayList<>());
+    rpc2.setExcludeTableCFsMap(excludeTableCfs);
+    hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, rpc2);
+    rpc2 = hbaseAdmin.getReplicationPeerConfig(ID_SECOND);
+    try {
+      namespaces.clear();
+      namespaces.add(ns2);
+      rpc2.setNamespaces(namespaces);
+      hbaseAdmin.updateReplicationPeerConfig(ID_SECOND, rpc2);
+      fail("Should throw ReplicationException" + " Because exclude namespace " + ns2
+          + " conflict with exclude table " + tableName2);
+    } catch (Exception e) {
+      // OK
+    }
+
+    hbaseAdmin.removeReplicationPeer(ID_ONE);
+    hbaseAdmin.removeReplicationPeer(ID_SECOND);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
index 608d22b..7c3773a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationWALEntryFilters.java
@@ -22,8 +22,7 @@ import static org.junit.Assert.assertNull;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
-import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -47,6 +46,8 @@ import org.junit.Assert;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
+
 @Category({ReplicationTests.class, SmallTests.class})
 public class TestReplicationWALEntryFilters {
 
@@ -205,23 +206,17 @@ public class TestReplicationWALEntryFilters {
     ReplicationPeer peer = mock(ReplicationPeer.class);
     ReplicationPeerConfig peerConfig = mock(ReplicationPeerConfig.class);
 
-    // 1. replicate all user tables
-    when(peerConfig.replicateAllUserTables()).thenReturn(true);
-    when(peer.getPeerConfig()).thenReturn(peerConfig);
-    Entry userEntry = createEntry(null, a, b, c);
-    ChainWALEntryFilter filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
-    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
-
-    // 2. not replicate all user tables, no namespaces and table-cfs config
+    // 1. replicate_all flag is false, no namespaces and table-cfs config
     when(peerConfig.replicateAllUserTables()).thenReturn(false);
     when(peerConfig.getNamespaces()).thenReturn(null);
     when(peerConfig.getTableCFsMap()).thenReturn(null);
     when(peer.getPeerConfig()).thenReturn(peerConfig);
-    userEntry = createEntry(null, a, b, c);
-    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    Entry userEntry = createEntry(null, a, b, c);
+    ChainWALEntryFilter filter =
+        new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
     assertEquals(null, filter.filter(userEntry));
 
-    // 3. Only config table-cfs in peer
+    // 2. replicate_all flag is false, and only config table-cfs in peer
     // empty map
     userEntry = createEntry(null, a, b, c);
     Map<TableName, List<String>> tableCfs = new HashMap<>();
@@ -261,7 +256,7 @@ public class TestReplicationWALEntryFilters {
     filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
     assertEquals(createEntry(null, a,c), filter.filter(userEntry));
 
-    // 3. Only config namespaces in peer
+    // 3. replicate_all flag is false, and only config namespaces in peer
     when(peer.getTableCFs()).thenReturn(null);
     // empty set
     Set<String> namespaces = new HashSet<>();
@@ -292,7 +287,7 @@ public class TestReplicationWALEntryFilters {
     filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
     assertEquals(null, filter.filter(userEntry));
 
-    // 4. Config namespaces and table-cfs both
+    // 4. replicate_all flag is false, and config namespaces and table-cfs both
     // Namespaces config should not confict with table-cfs config
     namespaces = new HashSet<>();
     tableCfs = new HashMap<>();
@@ -331,9 +326,110 @@ public class TestReplicationWALEntryFilters {
     assertEquals(null, filter.filter(userEntry));
   }
 
+  @Test
+  public void testNamespaceTableCfWALEntryFilter2() {
+    ReplicationPeer peer = mock(ReplicationPeer.class);
+    ReplicationPeerConfig peerConfig = mock(ReplicationPeerConfig.class);
+
+    // 1. replicate_all flag is true
+    // and no exclude namespaces and no exclude table-cfs config
+    when(peerConfig.replicateAllUserTables()).thenReturn(true);
+    when(peerConfig.getExcludeNamespaces()).thenReturn(null);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(null);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    Entry userEntry = createEntry(null, a, b, c);
+    ChainWALEntryFilter filter =
+        new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
+
+    // 2. replicate_all flag is true, and only config exclude namespaces
+    // empty set
+    Set<String> namespaces = new HashSet<String>();
+    when(peerConfig.getExcludeNamespaces()).thenReturn(namespaces);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(null);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
+
+    // exclude namespace default
+    namespaces.add("default");
+    when(peerConfig.getExcludeNamespaces()).thenReturn(namespaces);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(null);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(null, filter.filter(userEntry));
+
+    // exclude namespace ns1
+    namespaces = new HashSet<String>();
+    namespaces.add("ns1");
+    when(peerConfig.getExcludeNamespaces()).thenReturn(namespaces);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(null);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
+
+    // 3. replicate_all flag is true, and only config exclude table-cfs
+    // empty table-cfs map
+    Map<TableName, List<String>> tableCfs = new HashMap<TableName, List<String>>();
+    when(peerConfig.getExcludeNamespaces()).thenReturn(null);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(tableCfs);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
+
+    // exclude table bar
+    tableCfs = new HashMap<TableName, List<String>>();
+    tableCfs.put(TableName.valueOf("bar"), null);
+    when(peerConfig.getExcludeNamespaces()).thenReturn(null);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(tableCfs);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, a, b, c), filter.filter(userEntry));
+
+    // exclude table foo:a
+    tableCfs = new HashMap<TableName, List<String>>();
+    tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a"));
+    when(peerConfig.getExcludeNamespaces()).thenReturn(null);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(tableCfs);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, b, c), filter.filter(userEntry));
+
+    // 4. replicate_all flag is true, and config exclude namespaces and table-cfs both
+    // exclude ns1 and table foo:a,c
+    namespaces = new HashSet<String>();
+    tableCfs = new HashMap<TableName, List<String>>();
+    namespaces.add("ns1");
+    tableCfs.put(TableName.valueOf("foo"), Lists.newArrayList("a", "c"));
+    when(peerConfig.getExcludeNamespaces()).thenReturn(namespaces);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(tableCfs);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(createEntry(null, b), filter.filter(userEntry));
+
+    // exclude namespace default and table ns1:bar
+    namespaces = new HashSet<String>();
+    tableCfs = new HashMap<TableName, List<String>>();
+    namespaces.add("default");
+    tableCfs.put(TableName.valueOf("ns1:bar"), new ArrayList<String>());
+    when(peerConfig.getExcludeNamespaces()).thenReturn(namespaces);
+    when(peerConfig.getExcludeTableCFsMap()).thenReturn(tableCfs);
+    when(peer.getPeerConfig()).thenReturn(peerConfig);
+    userEntry = createEntry(null, a, b, c);
+    filter = new ChainWALEntryFilter(new NamespaceTableCfWALEntryFilter(peer));
+    assertEquals(null, filter.filter(userEntry));
+  }
+
   private Entry createEntry(TreeMap<byte[], Integer> scopes, byte[]... kvs) {
-    WALKeyImpl key1 =
-        new WALKeyImpl(new byte[0], TableName.valueOf("foo"), System.currentTimeMillis(), scopes);
+    WALKeyImpl key1 = new WALKeyImpl(new byte[0], TableName.valueOf("foo"),
+      System.currentTimeMillis(), scopes);
     WALEdit edit1 = new WALEdit();
 
     for (byte[] kv : kvs) {
@@ -342,7 +438,6 @@ public class TestReplicationWALEntryFilters {
     return new Entry(key1, edit1);
   }
 
-
   private void assertEquals(Entry e1, Entry e2) {
     Assert.assertEquals(e1 == null, e2 == null);
     if (e1 == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/hbase/replication_admin.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 50c086a..949bf68 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -152,7 +152,11 @@ module Hbase
     # Show the current tableCFs config for the specified peer
     def show_peer_tableCFs(id)
       rpc = @admin.getReplicationPeerConfig(id)
-      ReplicationPeerConfigUtil.convertToString(rpc.getTableCFsMap)
+      show_peer_tableCFs_by_config(rpc)
+    end
+
+    def show_peer_tableCFs_by_config(peer_config)
+      ReplicationPeerConfigUtil.convertToString(peer_config.getTableCFsMap)
     end
 
     #----------------------------------------------------------------------------------------------
@@ -274,6 +278,49 @@ module Hbase
       @replication_admin.updatePeerConfig(id, rpc)
     end
 
+    # Set exclude namespaces config for the specified peer
+    def set_peer_exclude_namespaces(id, exclude_namespaces)
+      return if exclude_namespaces.nil?
+      exclude_ns_set = java.util.HashSet.new
+      exclude_namespaces.each do |n|
+        exclude_ns_set.add(n)
+      end
+      rpc = get_peer_config(id)
+      return if rpc.nil?
+      rpc.setExcludeNamespaces(exclude_ns_set)
+      @admin.updateReplicationPeerConfig(id, rpc)
+    end
+
+    # Show the exclude namespaces config for the specified peer
+    def show_peer_exclude_namespaces(peer_config)
+      namespaces = peer_config.getExcludeNamespaces
+      return nil if namespaces.nil?
+      namespaces = java.util.ArrayList.new(namespaces)
+      java.util.Collections.sort(namespaces)
+      '!' + namespaces.join(';')
+    end
+
+    # Set exclude tableCFs config for the specified peer
+    def set_peer_exclude_tableCFs(id, exclude_tableCFs)
+      return if exclude_tableCFs.nil?
+      # convert tableCFs to TableName
+      map = java.util.HashMap.new
+      exclude_tableCFs.each do |key, val|
+        map.put(org.apache.hadoop.hbase.TableName.valueOf(key), val)
+      end
+      rpc = get_peer_config(id)
+      return if rpc.nil?
+      rpc.setExcludeTableCFsMap(map)
+      @admin.updateReplicationPeerConfig(id, rpc)
+    end
+
+    # Show the exclude tableCFs config for the specified peer
+    def show_peer_exclude_tableCFs(peer_config)
+      tableCFs = peer_config.getExcludeTableCFsMap
+      return nil if tableCFs.nil?
+      '!' + ReplicationPeerConfigUtil.convertToString(tableCFs)
+    end
+
     #----------------------------------------------------------------------------------------------
     # Enables a table's replication switch
     def enable_tablerep(table_name)

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell.rb b/hbase-shell/src/main/ruby/shell.rb
index 58886fc..a01a890 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -381,8 +381,10 @@ Shell.load_command_group(
     set_peer_namespaces
     append_peer_namespaces
     remove_peer_namespaces
+    set_peer_exclude_namespaces
     show_peer_tableCFs
     set_peer_tableCFs
+    set_peer_exclude_tableCFs
     set_peer_bandwidth
     list_replicated_tables
     append_peer_tableCFs

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
index 6812df4..522d23d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/list_peers.rb
@@ -23,7 +23,13 @@ module Shell
     class ListPeers < Command
       def help
         <<-EOF
-List all replication peer clusters.
+  List all replication peer clusters.
+
+  If replicate_all flag is false, the namespaces and table-cfs in peer config
+  will be replicated to peer cluster.
+
+  If replicate_all flag is true, all user tables will be replicate to peer
+  cluster, except that the namespaces and table-cfs in peer config.
 
   hbase> list_peers
 EOF
@@ -39,8 +45,13 @@ EOF
           id = peer.getPeerId
           state = peer.isEnabled ? 'ENABLED' : 'DISABLED'
           config = peer.getPeerConfig
-          namespaces = replication_admin.show_peer_namespaces(config)
-          tableCFs = replication_admin.show_peer_tableCFs(id)
+          if config.replicateAllUserTables
+            namespaces = replication_admin.show_peer_exclude_namespaces(config)
+            tableCFs = replication_admin.show_peer_exclude_tableCFs(config)
+          else
+            namespaces = replication_admin.show_peer_namespaces(config)
+            tableCFs = replication_admin.show_peer_tableCFs_by_config(config)
+          end
           formatter.row([id, config.getClusterKey,
                          config.getReplicationEndpointImpl, state,
                          config.replicateAllUserTables, namespaces, tableCFs,

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_namespaces.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_namespaces.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_namespaces.rb
new file mode 100644
index 0000000..bf9b90b
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_namespaces.rb
@@ -0,0 +1,52 @@
+#
+# Copyright The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class SetPeerExcludeNamespaces < Command
+      def help
+        <<-EOF
+  Set the namespaces which not replicated for the specified peer.
+
+  Note:
+  1. The replicate_all flag need to be true when set exclude namespaces.
+  2. Set a exclude namespace in the peer config means that all tables in this
+     namespace will not be replicated to the peer cluster. If peer config
+     already has a exclude table, then not allow set this table's namespace
+     as a exclude namespace.
+
+  Examples:
+
+    # set exclude namespaces config to null
+    hbase> set_peer_exclude_namespaces '1', []
+    # set namespaces which not replicated for a peer.
+    # set a exclude namespace in the peer config means that all tables in this
+    # namespace will not be replicated.
+    hbase> set_peer_exclude_namespaces '2', ["ns1", "ns2"]
+
+  EOF
+      end
+
+      def command(id, exclude_namespaces)
+        replication_admin.set_peer_exclude_namespaces(id, exclude_namespaces)
+      end
+    end
+  end
+end

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_tableCFs.rb
new file mode 100644
index 0000000..25be364
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_exclude_tableCFs.rb
@@ -0,0 +1,51 @@
+#
+# Copyright The Apache Software Foundation
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+    class SetPeerExcludeTableCFs < Command
+      def help
+        <<-EOF
+  Set the table-cfs which not replicated for the specified peer.
+
+  Note:
+  1. The replicate_all flag need to be true when set exclude table-cfs.
+  2. If peer config already has a exclude namespace, then not allow set any
+     exclude table of this namespace to the peer config.
+
+  Examples:
+
+    # set exclude table-cfs to null
+    hbase> set_peer_exclude_tableCFs '1'
+    # set table / table-cf which not replicated for a peer, for a table without
+    # an explicit column-family list, all column-families will not be replicated
+    hbase> set_peer_exclude_tableCFs '2', { "ns1:table1" => [],
+                                    "ns2:table2" => ["cf1", "cf2"],
+                                    "ns3:table3" => ["cfA", "cfB"]}
+
+  EOF
+      end
+
+      def command(id, exclude_peer_table_cfs = nil)
+        replication_admin.set_peer_exclude_tableCFs(id, exclude_peer_table_cfs)
+      end
+    end
+  end
+end

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/set_peer_namespaces.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_namespaces.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_namespaces.rb
index 6d14c1c..9f0649d 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_peer_namespaces.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_namespaces.rb
@@ -25,10 +25,10 @@ module Shell
         <<-EOF
   Set the replicable namespaces config for the specified peer.
 
-  Set a namespace in the peer config means that all tables in this
-  namespace will be replicated to the peer cluster. So if you already
-  have set a namespace in the peer config, then you can't set this
-  namespace's tables in the peer config again.
+  1. The replicate_all flag need to be false when set the replicable namespaces.
+  2. Set a namespace in the peer config means that all tables in this namespace
+     will be replicated to the peer cluster. If peer config already has a table,
+     then not allow set this table's namespace to the peer config.
 
   Examples:
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/set_peer_replicate_all.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_replicate_all.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_replicate_all.rb
index f6de615..8996964 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_peer_replicate_all.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_replicate_all.rb
@@ -26,7 +26,10 @@ module Shell
   Set the replicate_all flag to true or false for the specified peer.
 
   If replicate_all flag is true, then all user tables (REPLICATION_SCOPE != 0)
-  will be replicate to peer cluster.
+  will be replicate to peer cluster. But you can use 'set_peer_exclude_namespaces'
+  to set which namespaces can't be replicated to peer cluster. And you can use
+  'set_peer_exclude_tableCFs' to set which tables can't be replicated to peer
+  cluster.
 
   If replicate_all flag is false, then all user tables cannot be replicate to
   peer cluster. Then you can use 'set_peer_namespaces' or 'append_peer_namespaces'
@@ -36,6 +39,9 @@ module Shell
 
   Notice: When you want to change a peer's replicate_all flag from false to true,
           you need clean the peer's NAMESPACES and TABLECFS config firstly.
+          When you want to change a peer's replicate_all flag from true to false,
+          you need clean the peer's EXCLUDE_NAMESPACES and EXCLUDE_TABLECFS
+          config firstly.
 
   Examples:
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb b/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
index 6da2f11..03b2186 100644
--- a/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/set_peer_tableCFs.rb
@@ -25,8 +25,10 @@ module Shell
         <<-EOF
   Set the replicable table-cf config for the specified peer.
 
-  Can't set a table to table-cfs config if it's namespace already was in
-  namespaces config of this peer.
+  Note:
+  1. The replicate_all flag need to be false when set the replicable table-cfs.
+  2. Can't set a table to table-cfs config if it's namespace already was in
+     namespaces config of this peer.
 
   Examples:
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/03e79b79/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
----------------------------------------------------------------------
diff --git a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
index 4b74ada..0f84396 100644
--- a/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/replication_admin_test.rb
@@ -294,6 +294,29 @@ module Hbase
       command(:remove_peer, @peer_id)
     end
 
+    define_test 'set_peer_exclude_tableCFs: works with table-cfs map' do
+      cluster_key = 'zk4,zk5,zk6:11000:/hbase-test'
+      args = { CLUSTER_KEY => cluster_key }
+      command(:add_peer, @peer_id, args)
+
+      assert_equal(1, command(:list_peers).length)
+      peer = command(:list_peers).get(0)
+      assert_equal(@peer_id, peer.getPeerId)
+      assert_equal(cluster_key, peer.getPeerConfig.getClusterKey)
+
+      table_cfs = { 'table1' => [], 'table2' => ['cf1'],
+                    'ns3:table3' => ['cf1', 'cf2'] }
+      command(:set_peer_exclude_tableCFs, @peer_id, table_cfs)
+      assert_equal(1, command(:list_peers).length)
+      peer = command(:list_peers).get(0)
+      peer_config = peer.getPeerConfig
+      assert_equal(true, peer_config.replicateAllUserTables)
+      assert_tablecfs_equal(table_cfs, peer_config.getExcludeTableCFsMap)
+
+      # cleanup for future tests
+      replication_admin.remove_peer(@peer_id)
+    end
+
     define_test "set_peer_namespaces: works with namespaces array" do
       cluster_key = "zk4,zk5,zk6:11000:/hbase-test"
       namespaces = ["ns1", "ns2"]
@@ -395,6 +418,25 @@ module Hbase
       command(:remove_peer, @peer_id)
     end
 
+    define_test 'set_peer_exclude_namespaces: works with namespaces array' do
+      cluster_key = 'zk4,zk5,zk6:11000:/hbase-test'
+      namespaces = ['ns1', 'ns2']
+      namespaces_str = '!ns1;ns2'
+
+      args = { CLUSTER_KEY => cluster_key }
+      command(:add_peer, @peer_id, args)
+      command(:set_peer_exclude_namespaces, @peer_id, namespaces)
+
+      assert_equal(1, command(:list_peers).length)
+      peer_config = command(:list_peers).get(0).getPeerConfig
+      assert_equal(true, peer_config.replicateAllUserTables)
+      assert_equal(namespaces_str,
+                   replication_admin.show_peer_exclude_namespaces(peer_config))
+
+      # cleanup for future tests
+      command(:remove_peer, @peer_id)
+    end
+
     define_test 'set_peer_replicate_all' do
       cluster_key = 'zk4,zk5,zk6:11000:/hbase-test'
 


[22/24] hbase git commit: HBASE-19216 Implement a general framework to execute remote procedure on RS

Posted by zh...@apache.org.
HBASE-19216 Implement a general framework to execute remote procedure on RS


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/123cf75c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/123cf75c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/123cf75c

Branch: refs/heads/HBASE-19397
Commit: 123cf75ca0c6325cf406d52d3e230fa222115157
Parents: 3c78ad5
Author: zhangduo <zh...@apache.org>
Authored: Fri Dec 15 21:06:44 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Wed Dec 20 09:24:36 2017 +0800

----------------------------------------------------------------------
 .../hbase/procedure2/LockedResourceType.java    |   4 +-
 .../procedure2/RemoteProcedureDispatcher.java   |  23 +-
 .../src/main/protobuf/Admin.proto               |   9 +-
 .../src/main/protobuf/MasterProcedure.proto     |  30 +++
 .../src/main/protobuf/RegionServerStatus.proto  |  15 ++
 .../apache/hadoop/hbase/executor/EventType.java |  26 ++-
 .../hadoop/hbase/executor/ExecutorType.java     |   3 +-
 .../org/apache/hadoop/hbase/master/HMaster.java |  33 ++-
 .../hadoop/hbase/master/MasterRpcServices.java  |  13 ++
 .../assignment/RegionTransitionProcedure.java   |  18 +-
 .../procedure/MasterProcedureScheduler.java     | 224 +++++++++++++------
 .../procedure/PeerProcedureInterface.java       |  34 +++
 .../master/procedure/RSProcedureDispatcher.java |  90 ++++----
 .../master/replication/ModifyPeerProcedure.java | 127 +++++++++++
 .../master/replication/RefreshPeerCallable.java |  67 ++++++
 .../replication/RefreshPeerProcedure.java       | 197 ++++++++++++++++
 .../hbase/procedure2/RSProcedureCallable.java   |  43 ++++
 .../hbase/regionserver/HRegionServer.java       |  69 +++++-
 .../hbase/regionserver/RSRpcServices.java       |  56 +++--
 .../handler/RSProcedureHandler.java             |  51 +++++
 .../assignment/TestAssignmentManager.java       |  20 +-
 .../replication/DummyModifyPeerProcedure.java   |  41 ++++
 .../TestDummyModifyPeerProcedure.java           |  80 +++++++
 .../security/access/TestAccessController.java   |   7 +-
 24 files changed, 1110 insertions(+), 170 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
index c5fe62b..dc9b5d4 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -22,5 +22,5 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public enum LockedResourceType {
-  SERVER, NAMESPACE, TABLE, REGION
+  SERVER, NAMESPACE, TABLE, REGION, PEER
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 4cee524..904268d 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -221,13 +221,30 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
 
   /**
    * Remote procedure reference.
-   * @param <TEnv>
-   * @param <TRemote>
    */
   public interface RemoteProcedure<TEnv, TRemote> {
+    /**
+     * For building the remote operation.
+     */
     RemoteOperation remoteCallBuild(TEnv env, TRemote remote);
-    void remoteCallCompleted(TEnv env, TRemote remote, RemoteOperation response);
+
+    /**
+     * Called when the executeProcedure call is failed.
+     */
     void remoteCallFailed(TEnv env, TRemote remote, IOException exception);
+
+    /**
+     * Called when RS tells the remote procedure is succeeded through the
+     * {@code reportProcedureDone} method.
+     */
+    void remoteOperationCompleted(TEnv env);
+
+    /**
+     * Called when RS tells the remote procedure is failed through the {@code reportProcedureDone}
+     * method.
+     * @param error the error message
+     */
+    void remoteOperationFailed(TEnv env, String error);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-protocol-shaded/src/main/protobuf/Admin.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index 118c79b..ddcc266 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -256,14 +256,19 @@ message ClearRegionBlockCacheResponse {
   required CacheEvictionStats stats = 1;
 }
 
+message RemoteProcedureRequest {
+  required uint64 proc_id = 1;
+  required string proc_class = 2;
+  optional bytes proc_data = 3;
+}
+
 message ExecuteProceduresRequest {
   repeated OpenRegionRequest open_region = 1;
   repeated CloseRegionRequest close_region = 2;
+  repeated RemoteProcedureRequest proc = 3;
 }
 
 message ExecuteProceduresResponse {
-  repeated OpenRegionResponse open_region = 1;
-  repeated CloseRegionResponse close_region = 2;
 }
 
 service AdminService {

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index f9b8807..0e2bdba 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -365,3 +365,33 @@ message GCMergedRegionsStateData {
   required RegionInfo parent_b = 2;
   required RegionInfo merged_child = 3;
 }
+
+enum PeerModificationState {
+  UPDATE_PEER_STORAGE = 1;
+  REFRESH_PEER_ON_RS = 2;
+  POST_PEER_MODIFICATION = 3;
+}
+
+message PeerModificationStateData {
+  required string peer_id = 1;
+}
+
+enum PeerModificationType {
+  ADD_PEER = 1;
+  REMOVE_PEER = 2;
+  ENABLE_PEER = 3;
+  DISABLE_PEER = 4;
+  UPDATE_PEER_CONFIG = 5;
+}
+
+message RefreshPeerStateData {
+  required string peer_id = 1;
+  required PeerModificationType type = 2;
+  required ServerName target_server = 3;
+}
+
+message RefreshPeerParameter {
+  required string peer_id = 1;
+  required PeerModificationType type = 2;
+  required ServerName target_server = 3;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index f83bb20..eb396ac 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -143,7 +143,19 @@ message RegionSpaceUseReportRequest {
 }
 
 message RegionSpaceUseReportResponse {
+}
 
+message ReportProcedureDoneRequest {
+  required uint64 proc_id = 1;
+  enum Status {
+    SUCCESS = 1;
+    ERROR = 2;
+  }
+  required Status status = 2;
+  optional string error = 3;
+}
+
+message ReportProcedureDoneResponse {
 }
 
 service RegionServerStatusService {
@@ -181,4 +193,7 @@ service RegionServerStatusService {
    */
   rpc ReportRegionSpaceUse(RegionSpaceUseReportRequest)
     returns(RegionSpaceUseReportResponse);
+
+  rpc ReportProcedureDone(ReportProcedureDoneRequest)
+    returns(ReportProcedureDoneResponse);
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
index 26fb63a..922deb8 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventType.java
@@ -20,15 +20,14 @@ package org.apache.hadoop.hbase.executor;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
- * List of all HBase event handler types.  Event types are named by a
- * convention: event type names specify the component from which the event
- * originated and then where its destined -- e.g. RS2ZK_ prefix means the
- * event came from a regionserver destined for zookeeper -- and then what
- * the even is; e.g. REGION_OPENING.
- *
- * <p>We give the enums indices so we can add types later and keep them
- * grouped together rather than have to add them always to the end as we
- * would have to if we used raw enum ordinals.
+ * List of all HBase event handler types.
+ * <p>
+ * Event types are named by a convention: event type names specify the component from which the
+ * event originated and then where its destined -- e.g. RS_ZK_ prefix means the event came from a
+ * regionserver destined for zookeeper -- and then what the even is; e.g. REGION_OPENING.
+ * <p>
+ * We give the enums indices so we can add types later and keep them grouped together rather than
+ * have to add them always to the end as we would have to if we used raw enum ordinals.
  */
 @InterfaceAudience.Private
 public enum EventType {
@@ -275,7 +274,14 @@ public enum EventType {
    *
    * RS_COMPACTED_FILES_DISCHARGER
    */
-  RS_COMPACTED_FILES_DISCHARGER (83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER);
+  RS_COMPACTED_FILES_DISCHARGER (83, ExecutorType.RS_COMPACTED_FILES_DISCHARGER),
+
+  /**
+   * RS refresh peer.<br>
+   *
+   * RS_REFRESH_PEER
+   */
+  RS_REFRESH_PEER (84, ExecutorType.RS_REFRESH_PEER);
 
   private final int code;
   private final ExecutorType executor;

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
index c75a0a9..7f130d1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/ExecutorType.java
@@ -46,7 +46,8 @@ public enum ExecutorType {
   RS_LOG_REPLAY_OPS          (27),
   RS_REGION_REPLICA_FLUSH_OPS  (28),
   RS_COMPACTED_FILES_DISCHARGER (29),
-  RS_OPEN_PRIORITY_REGION    (30);
+  RS_OPEN_PRIORITY_REGION    (30),
+  RS_REFRESH_PEER               (31);
 
   ExecutorType(int value) {
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 262dfa2..0348b53 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -135,6 +134,7 @@ import org.apache.hadoop.hbase.procedure2.LockedResource;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
@@ -326,8 +326,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   // flag set after we become the active master (used for testing)
   private volatile boolean activeMaster = false;
 
-  // flag set after we complete initialization once active,
-  // it is not private since it's used in unit tests
+  // flag set after we complete initialization once active
   private final ProcedureEvent initialized = new ProcedureEvent("master initialized");
 
   // flag set after master services are started,
@@ -3522,4 +3521,28 @@ public class HMaster extends HRegionServer implements MasterServices {
   public SpaceQuotaSnapshotNotifier getSpaceQuotaSnapshotNotifier() {
     return this.spaceQuotaSnapshotNotifier;
   }
-}
+
+  @SuppressWarnings("unchecked")
+  private RemoteProcedure<MasterProcedureEnv, ?> getRemoteProcedure(long procId) {
+    Procedure<?> procedure = procedureExecutor.getProcedure(procId);
+    if (procedure == null) {
+      return null;
+    }
+    assert procedure instanceof RemoteProcedure;
+    return (RemoteProcedure<MasterProcedureEnv, ?>) procedure;
+  }
+
+  public void remoteProcedureCompleted(long procId) {
+    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
+    if (procedure != null) {
+      procedure.remoteOperationCompleted(procedureExecutor.getEnvironment());
+    }
+  }
+
+  public void remoteProcedureFailed(long procId, String error) {
+    RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
+    if (procedure != null) {
+      procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 6044d02..599e035 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -261,6 +261,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
@@ -2251,4 +2253,15 @@ public class MasterRpcServices extends RSRpcServices
     }
     return response.build();
   }
+
+  @Override
+  public ReportProcedureDoneResponse reportProcedureDone(RpcController controller,
+      ReportProcedureDoneRequest request) throws ServiceException {
+    if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
+      master.remoteProcedureCompleted(request.getProcId());
+    } else {
+      master.remoteProcedureFailed(request.getProcId(), request.getError());
+    }
+    return ReportProcedureDoneResponse.getDefaultInstance();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 17ba75a..47444ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -174,12 +174,6 @@ public abstract class RegionTransitionProcedure
       RegionStateNode regionNode, IOException exception);
 
   @Override
-  public void remoteCallCompleted(final MasterProcedureEnv env,
-      final ServerName serverName, final RemoteOperation response) {
-    // Ignore the response? reportTransition() is the one that count?
-  }
-
-  @Override
   public void remoteCallFailed(final MasterProcedureEnv env,
       final ServerName serverName, final IOException exception) {
     final RegionStateNode regionNode = getRegionState(env);
@@ -415,4 +409,16 @@ public abstract class RegionTransitionProcedure
    * @return ServerName the Assign or Unassign is going against.
    */
   public abstract ServerName getServer(final MasterProcedureEnv env);
+
+  @Override
+  public void remoteOperationCompleted(MasterProcedureEnv env) {
+    // should not be called for region operation until we modified the open/close region procedure
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public void remoteOperationFailed(MasterProcedureEnv env, String error) {
+    // should not be called for region operation until we modified the open/close region procedure
+    throw new UnsupportedOperationException();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
index 9402845..ee7af2d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureScheduler.java
@@ -24,7 +24,7 @@ import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.Map.Entry;
+import java.util.function.Function;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +35,7 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface.PeerOperationType;
 import org.apache.hadoop.hbase.master.procedure.TableProcedureInterface.TableOperationType;
 import org.apache.hadoop.hbase.procedure2.AbstractProcedureScheduler;
 import org.apache.hadoop.hbase.procedure2.LockAndQueue;
@@ -110,12 +111,17 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       new ServerQueueKeyComparator();
   private final static TableQueueKeyComparator TABLE_QUEUE_KEY_COMPARATOR =
       new TableQueueKeyComparator();
+  private final static PeerQueueKeyComparator PEER_QUEUE_KEY_COMPARATOR =
+      new PeerQueueKeyComparator();
 
   private final FairQueue<ServerName> serverRunQueue = new FairQueue<>();
   private final FairQueue<TableName> tableRunQueue = new FairQueue<>();
+  private final FairQueue<String> peerRunQueue = new FairQueue<>();
 
   private final ServerQueue[] serverBuckets = new ServerQueue[128];
   private TableQueue tableMap = null;
+  private PeerQueue peerMap = null;
+
   private final SchemaLocking locking = new SchemaLocking();
 
   /**
@@ -162,6 +168,8 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       doAdd(tableRunQueue, getTableQueue(getTableName(proc)), proc, addFront);
     } else if (isServerProcedure(proc)) {
       doAdd(serverRunQueue, getServerQueue(getServerName(proc)), proc, addFront);
+    } else if (isPeerProcedure(proc)) {
+      doAdd(peerRunQueue, getPeerQueue(getPeerId(proc)), proc, addFront);
     } else {
       // TODO: at the moment we only have Table and Server procedures
       // if you are implementing a non-table/non-server procedure, you have two options: create
@@ -173,7 +181,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   }
 
   private <T extends Comparable<T>> void doAdd(final FairQueue<T> fairq,
-      final Queue<T> queue, final Procedure proc, final boolean addFront) {
+      final Queue<T> queue, final Procedure<?> proc, final boolean addFront) {
     queue.add(proc, addFront);
     if (!queue.getLockStatus().hasExclusiveLock() || queue.getLockStatus().isLockOwner(proc.getProcId())) {
       // if the queue was not remove for an xlock execution
@@ -190,7 +198,8 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
 
   @Override
   protected boolean queueHasRunnables() {
-    return tableRunQueue.hasRunnables() || serverRunQueue.hasRunnables();
+    return tableRunQueue.hasRunnables() || serverRunQueue.hasRunnables() ||
+        peerRunQueue.hasRunnables();
   }
 
   @Override
@@ -198,7 +207,10 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     // For now, let server handling have precedence over table handling; presumption is that it
     // is more important handling crashed servers than it is running the
     // enabling/disabling tables, etc.
-    Procedure pollResult = doPoll(serverRunQueue);
+    Procedure<?> pollResult = doPoll(serverRunQueue);
+    if (pollResult == null) {
+      pollResult = doPoll(peerRunQueue);
+    }
     if (pollResult == null) {
       pollResult = doPoll(tableRunQueue);
     }
@@ -268,60 +280,30 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         exclusiveLockOwnerProcedure, sharedLockCount, waitingProcedures);
   }
 
+  private <T> void addToLockedResources(List<LockedResource> lockedResources,
+      Map<T, LockAndQueue> locks, Function<T, String> keyTransformer,
+      LockedResourceType resourcesType) {
+    locks.entrySet().stream().filter(e -> e.getValue().isLocked())
+        .map(
+          e -> createLockedResource(resourcesType, keyTransformer.apply(e.getKey()), e.getValue()))
+        .forEachOrdered(lockedResources::add);
+  }
+
   @Override
   public List<LockedResource> getLocks() {
     schedLock();
-
     try {
       List<LockedResource> lockedResources = new ArrayList<>();
-
-      for (Entry<ServerName, LockAndQueue> entry : locking.serverLocks
-          .entrySet()) {
-        String serverName = entry.getKey().getServerName();
-        LockAndQueue queue = entry.getValue();
-
-        if (queue.isLocked()) {
-          LockedResource lockedResource =
-            createLockedResource(LockedResourceType.SERVER, serverName, queue);
-          lockedResources.add(lockedResource);
-        }
-      }
-
-      for (Entry<String, LockAndQueue> entry : locking.namespaceLocks
-          .entrySet()) {
-        String namespaceName = entry.getKey();
-        LockAndQueue queue = entry.getValue();
-
-        if (queue.isLocked()) {
-          LockedResource lockedResource =
-            createLockedResource(LockedResourceType.NAMESPACE, namespaceName, queue);
-          lockedResources.add(lockedResource);
-        }
-      }
-
-      for (Entry<TableName, LockAndQueue> entry : locking.tableLocks
-          .entrySet()) {
-        String tableName = entry.getKey().getNameAsString();
-        LockAndQueue queue = entry.getValue();
-
-        if (queue.isLocked()) {
-          LockedResource lockedResource =
-            createLockedResource(LockedResourceType.TABLE, tableName, queue);
-          lockedResources.add(lockedResource);
-        }
-      }
-
-      for (Entry<String, LockAndQueue> entry : locking.regionLocks.entrySet()) {
-        String regionName = entry.getKey();
-        LockAndQueue queue = entry.getValue();
-
-        if (queue.isLocked()) {
-          LockedResource lockedResource =
-            createLockedResource(LockedResourceType.REGION, regionName, queue);
-          lockedResources.add(lockedResource);
-        }
-      }
-
+      addToLockedResources(lockedResources, locking.serverLocks, sn -> sn.getServerName(),
+        LockedResourceType.SERVER);
+      addToLockedResources(lockedResources, locking.namespaceLocks, Function.identity(),
+        LockedResourceType.NAMESPACE);
+      addToLockedResources(lockedResources, locking.tableLocks, tn -> tn.getNameAsString(),
+        LockedResourceType.TABLE);
+      addToLockedResources(lockedResources, locking.regionLocks, Function.identity(),
+        LockedResourceType.REGION);
+      addToLockedResources(lockedResources, locking.peerLocks, Function.identity(),
+        LockedResourceType.PEER);
       return lockedResources;
     } finally {
       schedUnlock();
@@ -329,8 +311,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   }
 
   @Override
-  public LockedResource getLockResource(LockedResourceType resourceType,
-      String resourceName) {
+  public LockedResource getLockResource(LockedResourceType resourceType, String resourceName) {
     LockAndQueue queue = null;
     schedLock();
     try {
@@ -347,8 +328,10 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         case REGION:
           queue = locking.regionLocks.get(resourceName);
           break;
+        case PEER:
+          queue = locking.peerLocks.get(resourceName);
+          break;
       }
-
       return queue != null ? createLockedResource(resourceType, resourceName, queue) : null;
     } finally {
       schedUnlock();
@@ -432,6 +415,11 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
         markTableAsDeleted(iProcTable.getTableName(), proc);
         return;
       }
+    } else if (proc instanceof PeerProcedureInterface) {
+      PeerProcedureInterface iProcPeer = (PeerProcedureInterface) proc;
+      if (iProcPeer.getPeerOperationType() == PeerOperationType.REMOVE) {
+        removePeerQueue(iProcPeer.getPeerId());
+      }
     } else {
       // No cleanup for ServerProcedureInterface types, yet.
       return;
@@ -469,12 +457,11 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     locking.removeTableLock(tableName);
   }
 
-
-  private static boolean isTableProcedure(Procedure proc) {
+  private static boolean isTableProcedure(Procedure<?> proc) {
     return proc instanceof TableProcedureInterface;
   }
 
-  private static TableName getTableName(Procedure proc) {
+  private static TableName getTableName(Procedure<?> proc) {
     return ((TableProcedureInterface)proc).getTableName();
   }
 
@@ -495,15 +482,42 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     return Math.abs(hashCode) % buckets.length;
   }
 
-  private static boolean isServerProcedure(Procedure proc) {
+  private static boolean isServerProcedure(Procedure<?> proc) {
     return proc instanceof ServerProcedureInterface;
   }
 
-  private static ServerName getServerName(Procedure proc) {
+  private static ServerName getServerName(Procedure<?> proc) {
     return ((ServerProcedureInterface)proc).getServerName();
   }
 
   // ============================================================================
+  //  Peer Queue Lookup Helpers
+  // ============================================================================
+  private PeerQueue getPeerQueue(String peerId) {
+    PeerQueue node = AvlTree.get(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR);
+    if (node != null) {
+      return node;
+    }
+    node = new PeerQueue(peerId, locking.getPeerLock(peerId));
+    peerMap = AvlTree.insert(peerMap, node);
+    return node;
+  }
+
+  private void removePeerQueue(String peerId) {
+    peerMap = AvlTree.remove(peerMap, peerId, PEER_QUEUE_KEY_COMPARATOR);
+    locking.removePeerLock(peerId);
+  }
+
+
+  private static boolean isPeerProcedure(Procedure<?> proc) {
+    return proc instanceof PeerProcedureInterface;
+  }
+
+  private static String getPeerId(Procedure<?> proc) {
+    return ((PeerProcedureInterface) proc).getPeerId();
+  }
+
+  // ============================================================================
   //  Table and Server Queue Implementation
   // ============================================================================
   private static class ServerQueueKeyComparator implements AvlKeyComparator<ServerQueue> {
@@ -572,6 +586,26 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     }
   }
 
+  private static class PeerQueueKeyComparator implements AvlKeyComparator<PeerQueue> {
+
+    @Override
+    public int compareKey(PeerQueue node, Object key) {
+      return node.compareKey((String) key);
+    }
+  }
+
+  public static class PeerQueue extends Queue<String> {
+
+    public PeerQueue(String peerId, LockStatus lockStatus) {
+      super(peerId, lockStatus);
+    }
+
+    @Override
+    public boolean requireExclusiveLock(Procedure proc) {
+      return requirePeerExclusiveLock((PeerProcedureInterface) proc);
+    }
+  }
+
   // ============================================================================
   //  Table Locking Helpers
   // ============================================================================
@@ -959,7 +993,8 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * @param serverName Server to lock
    * @return true if the procedure has to wait for the server to be available
    */
-  public boolean waitServerExclusiveLock(final Procedure procedure, final ServerName serverName) {
+  public boolean waitServerExclusiveLock(final Procedure<?> procedure,
+      final ServerName serverName) {
     schedLock();
     try {
       final LockAndQueue lock = locking.getServerLock(serverName);
@@ -981,7 +1016,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
    * @param procedure the procedure releasing the lock
    * @param serverName the server that has the exclusive lock
    */
-  public void wakeServerExclusiveLock(final Procedure procedure, final ServerName serverName) {
+  public void wakeServerExclusiveLock(final Procedure<?> procedure, final ServerName serverName) {
     schedLock();
     try {
       final LockAndQueue lock = locking.getServerLock(serverName);
@@ -995,6 +1030,56 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
   }
 
   // ============================================================================
+  //  Peer Locking Helpers
+  // ============================================================================
+
+  private static boolean requirePeerExclusiveLock(PeerProcedureInterface proc) {
+    return proc.getPeerOperationType() != PeerOperationType.REFRESH;
+  }
+
+  /**
+   * Try to acquire the exclusive lock on the specified peer.
+   * @see #wakePeerExclusiveLock(Procedure, String)
+   * @param procedure the procedure trying to acquire the lock
+   * @param peerId peer to lock
+   * @return true if the procedure has to wait for the per to be available
+   */
+  public boolean waitPeerExclusiveLock(Procedure<?> procedure, String peerId) {
+    schedLock();
+    try {
+      final LockAndQueue lock = locking.getPeerLock(peerId);
+      if (lock.tryExclusiveLock(procedure)) {
+        removeFromRunQueue(peerRunQueue, getPeerQueue(peerId));
+        return false;
+      }
+      waitProcedure(lock, procedure);
+      logLockedResource(LockedResourceType.PEER, peerId);
+      return true;
+    } finally {
+      schedUnlock();
+    }
+  }
+
+  /**
+   * Wake the procedures waiting for the specified peer
+   * @see #waitPeerExclusiveLock(Procedure, String)
+   * @param procedure the procedure releasing the lock
+   * @param peerId the peer that has the exclusive lock
+   */
+  public void wakePeerExclusiveLock(Procedure<?> procedure, String peerId) {
+    schedLock();
+    try {
+      final LockAndQueue lock = locking.getPeerLock(peerId);
+      lock.releaseExclusiveLock(procedure);
+      addToRunQueue(peerRunQueue, getPeerQueue(peerId));
+      int waitingCount = wakeWaitingProcedures(lock);
+      wakePollIfNeeded(waitingCount);
+    } finally {
+      schedUnlock();
+    }
+  }
+
+  // ============================================================================
   //  Generic Helpers
   // ============================================================================
   private static abstract class Queue<TKey extends Comparable<TKey>>
@@ -1099,6 +1184,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
     final Map<TableName, LockAndQueue> tableLocks = new HashMap<>();
     // Single map for all regions irrespective of tables. Key is encoded region name.
     final Map<String, LockAndQueue> regionLocks = new HashMap<>();
+    final Map<String, LockAndQueue> peerLocks = new HashMap<>();
 
     private <T> LockAndQueue getLock(Map<T, LockAndQueue> map, T key) {
       LockAndQueue lock = map.get(key);
@@ -1133,6 +1219,14 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return getLock(serverLocks, serverName);
     }
 
+    LockAndQueue getPeerLock(String peerId) {
+      return getLock(peerLocks, peerId);
+    }
+
+    LockAndQueue removePeerLock(String peerId) {
+      return peerLocks.remove(peerId);
+    }
+
     /**
      * Removes all locks by clearing the maps.
      * Used when procedure executor is stopped for failure and recovery testing.
@@ -1143,6 +1237,7 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       namespaceLocks.clear();
       tableLocks.clear();
       regionLocks.clear();
+      peerLocks.clear();
     }
 
     @Override
@@ -1150,7 +1245,8 @@ public class MasterProcedureScheduler extends AbstractProcedureScheduler {
       return "serverLocks=" + filterUnlocked(this.serverLocks) +
         ", namespaceLocks=" + filterUnlocked(this.namespaceLocks) +
         ", tableLocks=" + filterUnlocked(this.tableLocks) +
-        ", regionLocks=" + filterUnlocked(this.regionLocks);
+        ", regionLocks=" + filterUnlocked(this.regionLocks) +
+        ", peerLocks=" + filterUnlocked(this.peerLocks);
     }
 
     private String filterUnlocked(Map<?, LockAndQueue> locks) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
new file mode 100644
index 0000000..4abc9ad
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/PeerProcedureInterface.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.procedure;
+
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface PeerProcedureInterface {
+
+  enum PeerOperationType {
+    ADD, REMOVE, ENABLE, DISABLE, UPDATE_CONFIG, REFRESH
+  }
+
+  String getPeerId();
+
+  PeerOperationType getPeerOperationType();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
index 045c416..f17c434 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/RSProcedureDispatcher.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.ipc.RemoteException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.ArrayListMultimap;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProc
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest;
 
 /**
  * A remote procecdure dispatcher for regionservers.
@@ -223,7 +225,10 @@ public class RSProcedureDispatcher
 
   private interface RemoteProcedureResolver {
     void dispatchOpenRequests(MasterProcedureEnv env, List<RegionOpenOperation> operations);
+
     void dispatchCloseRequests(MasterProcedureEnv env, List<RegionCloseOperation> operations);
+
+    void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations);
   }
 
   /**
@@ -232,22 +237,28 @@ public class RSProcedureDispatcher
    * Then {@code resolver} is used to dispatch {@link RegionOpenOperation}s and
    * {@link RegionCloseOperation}s.
    * @param serverName RegionServer to which the remote operations are sent
-   * @param remoteProcedures Remote procedures which are dispatched to the given server
+   * @param operations Remote procedures which are dispatched to the given server
    * @param resolver Used to dispatch remote procedures to given server.
    */
-  public void splitAndResolveOperation(final ServerName serverName,
-      final Set<RemoteProcedure> remoteProcedures, final RemoteProcedureResolver resolver) {
-    final ArrayListMultimap<Class<?>, RemoteOperation> reqsByType =
-      buildAndGroupRequestByType(procedureEnv, serverName, remoteProcedures);
+  public void splitAndResolveOperation(ServerName serverName, Set<RemoteProcedure> operations,
+      RemoteProcedureResolver resolver) {
+    MasterProcedureEnv env = master.getMasterProcedureExecutor().getEnvironment();
+    ArrayListMultimap<Class<?>, RemoteOperation> reqsByType =
+      buildAndGroupRequestByType(env, serverName, operations);
 
-    final List<RegionOpenOperation> openOps = fetchType(reqsByType, RegionOpenOperation.class);
+    List<RegionOpenOperation> openOps = fetchType(reqsByType, RegionOpenOperation.class);
     if (!openOps.isEmpty()) {
-      resolver.dispatchOpenRequests(procedureEnv, openOps);
+      resolver.dispatchOpenRequests(env, openOps);
     }
 
-    final List<RegionCloseOperation> closeOps = fetchType(reqsByType, RegionCloseOperation.class);
+    List<RegionCloseOperation> closeOps = fetchType(reqsByType, RegionCloseOperation.class);
     if (!closeOps.isEmpty()) {
-      resolver.dispatchCloseRequests(procedureEnv, closeOps);
+      resolver.dispatchCloseRequests(env, closeOps);
+    }
+
+    List<ServerOperation> refreshOps = fetchType(reqsByType, ServerOperation.class);
+    if (!refreshOps.isEmpty()) {
+      resolver.dispatchServerOperations(env, refreshOps);
     }
 
     if (!reqsByType.isEmpty()) {
@@ -278,8 +289,7 @@ public class RSProcedureDispatcher
       splitAndResolveOperation(getServerName(), remoteProcedures, this);
 
       try {
-        final ExecuteProceduresResponse response = sendRequest(getServerName(), request.build());
-        remoteCallCompleted(procedureEnv, response);
+        sendRequest(getServerName(), request.build());
       } catch (IOException e) {
         e = unwrapException(e);
         // TODO: In the future some operation may want to bail out early.
@@ -303,6 +313,11 @@ public class RSProcedureDispatcher
       }
     }
 
+    @Override
+    public void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations) {
+      operations.stream().map(o -> o.buildRequest()).forEachOrdered(request::addProc);
+    }
+
     protected ExecuteProceduresResponse sendRequest(final ServerName serverName,
         final ExecuteProceduresRequest request) throws IOException {
       try {
@@ -312,17 +327,8 @@ public class RSProcedureDispatcher
       }
     }
 
-
-    private void remoteCallCompleted(final MasterProcedureEnv env,
-        final ExecuteProceduresResponse response) {
-      /*
-      for (RemoteProcedure proc: operations) {
-        proc.remoteCallCompleted(env, getServerName(), response);
-      }*/
-    }
-
     private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
-      for (RemoteProcedure proc: remoteProcedures) {
+      for (RemoteProcedure proc : remoteProcedures) {
         proc.remoteCallFailed(env, getServerName(), e);
       }
     }
@@ -363,8 +369,7 @@ public class RSProcedureDispatcher
           buildOpenRegionRequest(procedureEnv, getServerName(), operations);
 
       try {
-        OpenRegionResponse response = sendRequest(getServerName(), request);
-        remoteCallCompleted(procedureEnv, response);
+        sendRequest(getServerName(), request);
       } catch (IOException e) {
         e = unwrapException(e);
         // TODO: In the future some operation may want to bail out early.
@@ -385,16 +390,6 @@ public class RSProcedureDispatcher
       }
     }
 
-    private void remoteCallCompleted(final MasterProcedureEnv env,
-        final OpenRegionResponse response) {
-      int index = 0;
-      for (RegionOpenOperation op: operations) {
-        OpenRegionResponse.RegionOpeningState state = response.getOpeningState(index++);
-        op.setFailedOpen(state == OpenRegionResponse.RegionOpeningState.FAILED_OPENING);
-        op.getRemoteProcedure().remoteCallCompleted(env, getServerName(), op);
-      }
-    }
-
     private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
       for (RegionOpenOperation op: operations) {
         op.getRemoteProcedure().remoteCallFailed(env, getServerName(), e);
@@ -444,7 +439,6 @@ public class RSProcedureDispatcher
     private void remoteCallCompleted(final MasterProcedureEnv env,
         final CloseRegionResponse response) {
       operation.setClosed(response.getClosed());
-      operation.getRemoteProcedure().remoteCallCompleted(env, getServerName(), operation);
     }
 
     private void remoteCallFailed(final MasterProcedureEnv env, final IOException e) {
@@ -483,6 +477,11 @@ public class RSProcedureDispatcher
         submitTask(new CloseRegionRemoteCall(serverName, op));
       }
     }
+
+    @Override
+    public void dispatchServerOperations(MasterProcedureEnv env, List<ServerOperation> operations) {
+      throw new UnsupportedOperationException();
+    }
   }
 
   // ==========================================================================
@@ -490,13 +489,28 @@ public class RSProcedureDispatcher
   //  - ServerOperation: refreshConfig, grant, revoke, ... (TODO)
   //  - RegionOperation: open, close, flush, snapshot, ...
   // ==========================================================================
-  /* Currently unused
-  public static abstract class ServerOperation extends RemoteOperation {
-    protected ServerOperation(final RemoteProcedure remoteProcedure) {
+
+  public static final class ServerOperation extends RemoteOperation {
+
+    private final long procId;
+
+    private final Class<?> rsProcClass;
+
+    private final byte[] rsProcData;
+
+    public ServerOperation(RemoteProcedure remoteProcedure, long procId, Class<?> rsProcClass,
+        byte[] rsProcData) {
       super(remoteProcedure);
+      this.procId = procId;
+      this.rsProcClass = rsProcClass;
+      this.rsProcData = rsProcData;
+    }
+
+    public RemoteProcedureRequest buildRequest() {
+      return RemoteProcedureRequest.newBuilder().setProcId(procId)
+          .setProcClass(rsProcClass.getName()).setProcData(ByteString.copyFrom(rsProcData)).build();
     }
   }
-  */
 
   public static abstract class RegionOperation extends RemoteOperation {
     private final RegionInfo regionInfo;

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
new file mode 100644
index 0000000..fca05a7
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
+
+@InterfaceAudience.Private
+public abstract class ModifyPeerProcedure
+    extends StateMachineProcedure<MasterProcedureEnv, PeerModificationState>
+    implements PeerProcedureInterface {
+
+  private static final Log LOG = LogFactory.getLog(ModifyPeerProcedure.class);
+
+  protected String peerId;
+
+  protected ModifyPeerProcedure() {
+  }
+
+  protected ModifyPeerProcedure(String peerId) {
+    this.peerId = peerId;
+  }
+
+  @Override
+  public String getPeerId() {
+    return peerId;
+  }
+
+  /**
+   * Return {@code false} means that the operation is invalid and we should give up, otherwise
+   * {@code true}.
+   * <p>
+   * You need to call {@link #setFailure(String, Throwable)} to give the detail failure information.
+   */
+  protected abstract boolean updatePeerStorage() throws IOException;
+
+  protected void postPeerModification() {
+  }
+
+  @Override
+  protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState state)
+      throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
+    switch (state) {
+      case UPDATE_PEER_STORAGE:
+        try {
+          if (!updatePeerStorage()) {
+            assert isFailed() : "setFailure is not called";
+            return Flow.NO_MORE_STATE;
+          }
+        } catch (IOException e) {
+          LOG.warn("update peer storage failed, retry", e);
+          throw new ProcedureYieldException();
+        }
+        setNextState(PeerModificationState.REFRESH_PEER_ON_RS);
+        return Flow.HAS_MORE_STATE;
+      case REFRESH_PEER_ON_RS:
+        addChildProcedure(env.getMasterServices().getServerManager().getOnlineServersList().stream()
+            .map(sn -> new RefreshPeerProcedure(peerId, getPeerOperationType(), sn))
+            .toArray(RefreshPeerProcedure[]::new));
+        setNextState(PeerModificationState.POST_PEER_MODIFICATION);
+        return Flow.HAS_MORE_STATE;
+      case POST_PEER_MODIFICATION:
+        postPeerModification();
+        return Flow.NO_MORE_STATE;
+      default:
+        throw new UnsupportedOperationException("unhandled state=" + state);
+    }
+  }
+
+  @Override
+  protected LockState acquireLock(MasterProcedureEnv env) {
+    return env.getProcedureScheduler().waitPeerExclusiveLock(this, peerId)
+      ? LockState.LOCK_EVENT_WAIT
+      : LockState.LOCK_ACQUIRED;
+  }
+
+  @Override
+  protected void releaseLock(MasterProcedureEnv env) {
+    env.getProcedureScheduler().wakePeerExclusiveLock(this, peerId);
+  }
+
+  @Override
+  protected void rollbackState(MasterProcedureEnv env, PeerModificationState state)
+      throws IOException, InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  protected PeerModificationState getState(int stateId) {
+    return PeerModificationState.forNumber(stateId);
+  }
+
+  @Override
+  protected int getStateId(PeerModificationState state) {
+    return state.getNumber();
+  }
+
+  @Override
+  protected PeerModificationState getInitialState() {
+    return PeerModificationState.UPDATE_PEER_STORAGE;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
new file mode 100644
index 0000000..4e09107
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
+
+/**
+ * The callable executed at RS side to refresh the peer config/state.
+ * <p>
+ * TODO: only a dummy implementation for verifying the framework, will add implementation later.
+ */
+@InterfaceAudience.Private
+public class RefreshPeerCallable implements RSProcedureCallable {
+
+  private HRegionServer rs;
+
+  private String peerId;
+
+  private Exception initError;
+
+  @Override
+  public Void call() throws Exception {
+    if (initError != null) {
+      throw initError;
+    }
+    rs.getFileSystem().create(new Path("/" + peerId + "/" + rs.getServerName().toString())).close();
+    return null;
+  }
+
+  @Override
+  public void init(byte[] parameter, HRegionServer rs) {
+    this.rs = rs;
+    try {
+      this.peerId = RefreshPeerParameter.parseFrom(parameter).getPeerId();
+    } catch (InvalidProtocolBufferException e) {
+      initError = e;
+      return;
+    }
+  }
+
+  @Override
+  public EventType getEventType() {
+    return EventType.RS_REFRESH_PEER;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
new file mode 100644
index 0000000..18da487
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -0,0 +1,197 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.RSProcedureDispatcher.ServerOperation;
+import org.apache.hadoop.hbase.procedure2.Procedure;
+import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
+import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationType;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerStateData;
+
+@InterfaceAudience.Private
+public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
+    implements PeerProcedureInterface, RemoteProcedure<MasterProcedureEnv, ServerName> {
+
+  private static final Log LOG = LogFactory.getLog(RefreshPeerProcedure.class);
+
+  private String peerId;
+
+  private PeerOperationType type;
+
+  private ServerName targetServer;
+
+  private boolean dispatched;
+
+  private ProcedureEvent<?> event;
+
+  private boolean succ;
+
+  public RefreshPeerProcedure() {
+  }
+
+  public RefreshPeerProcedure(String peerId, PeerOperationType type, ServerName targetServer) {
+    this.peerId = peerId;
+    this.type = type;
+    this.targetServer = targetServer;
+  }
+
+  @Override
+  public String getPeerId() {
+    return peerId;
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.REFRESH;
+  }
+
+  private static PeerModificationType toPeerModificationType(PeerOperationType type) {
+    switch (type) {
+      case ADD:
+        return PeerModificationType.ADD_PEER;
+      case REMOVE:
+        return PeerModificationType.REMOVE_PEER;
+      case ENABLE:
+        return PeerModificationType.ENABLE_PEER;
+      case DISABLE:
+        return PeerModificationType.DISABLE_PEER;
+      case UPDATE_CONFIG:
+        return PeerModificationType.UPDATE_PEER_CONFIG;
+      default:
+        throw new IllegalArgumentException("Unknown type: " + type);
+    }
+  }
+
+  private static PeerOperationType toPeerOperationType(PeerModificationType type) {
+    switch (type) {
+      case ADD_PEER:
+        return PeerOperationType.ADD;
+      case REMOVE_PEER:
+        return PeerOperationType.REMOVE;
+      case ENABLE_PEER:
+        return PeerOperationType.ENABLE;
+      case DISABLE_PEER:
+        return PeerOperationType.DISABLE;
+      case UPDATE_PEER_CONFIG:
+        return PeerOperationType.UPDATE_CONFIG;
+      default:
+        throw new IllegalArgumentException("Unknown type: " + type);
+    }
+  }
+
+  @Override
+  public RemoteOperation remoteCallBuild(MasterProcedureEnv env, ServerName remote) {
+    assert targetServer.equals(remote);
+    return new ServerOperation(this, getProcId(), RefreshPeerCallable.class,
+        RefreshPeerParameter.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
+            .setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
+  }
+
+  private void complete(MasterProcedureEnv env, boolean succ) {
+    if (event == null) {
+      LOG.warn("procedure event for " + getProcId() +
+          " is null, maybe the procedure is created when recovery", new Exception());
+      return;
+    }
+    LOG.info("Refresh peer " + peerId + " for " + type + " on " + targetServer +
+        (succ ? " suceeded" : " failed"));
+    this.succ = succ;
+    event.wake(env.getProcedureScheduler());
+    event = null;
+  }
+
+  @Override
+  public synchronized void remoteCallFailed(MasterProcedureEnv env, ServerName remote,
+      IOException exception) {
+    complete(env, false);
+  }
+
+  @Override
+  public synchronized void remoteOperationCompleted(MasterProcedureEnv env) {
+    complete(env, true);
+  }
+
+  @Override
+  public synchronized void remoteOperationFailed(MasterProcedureEnv env, String error) {
+    complete(env, false);
+  }
+
+  @Override
+  protected synchronized Procedure<MasterProcedureEnv>[] execute(MasterProcedureEnv env)
+      throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException {
+    if (dispatched) {
+      if (succ) {
+        return null;
+      }
+      // retry
+      dispatched = false;
+    }
+    if (!env.getRemoteDispatcher().addOperationToNode(targetServer, this)) {
+      LOG.info("Can not add remote operation for refreshing peer " + peerId + " for " + type +
+          " to " + targetServer + ", this usually because the server is already dead," +
+          " give up and mark the procedure as complete");
+      return null;
+    }
+    dispatched = true;
+    event = new ProcedureEvent<>(this);
+    event.suspendIfNotReady(this);
+    throw new ProcedureSuspendedException();
+  }
+
+  @Override
+  protected void rollback(MasterProcedureEnv env) throws IOException, InterruptedException {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  protected boolean abort(MasterProcedureEnv env) {
+    // TODO: no correctness problem if we just ignore this, implement later.
+    return false;
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    serializer.serialize(
+      RefreshPeerStateData.newBuilder().setPeerId(peerId).setType(toPeerModificationType(type))
+          .setTargetServer(ProtobufUtil.toServerName(targetServer)).build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    RefreshPeerStateData data = serializer.deserialize(RefreshPeerStateData.class);
+    peerId = data.getPeerId();
+    type = toPeerOperationType(data.getType());
+    targetServer = ProtobufUtil.toServerName(data.getTargetServer());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java
new file mode 100644
index 0000000..62c2e36
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure2/RSProcedureCallable.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.procedure2;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A general interface for a sub procedure runs at RS side.
+ */
+@InterfaceAudience.Private
+public interface RSProcedureCallable extends Callable<Void> {
+
+  /**
+   * Initialize the callable
+   * @param parameter the parameter passed from master.
+   * @param rs the regionserver instance
+   */
+  void init(byte[] parameter, HRegionServer rs);
+
+  /**
+   * Event type used to select thread pool.
+   */
+  EventType getEventType();
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index e2d6ba0..779451b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -17,9 +17,6 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import javax.management.MalformedObjectNameException;
-import javax.management.ObjectName;
-import javax.servlet.http.HttpServlet;
 import java.io.IOException;
 import java.lang.Thread.UncaughtExceptionHandler;
 import java.lang.management.MemoryType;
@@ -51,6 +48,10 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.Function;
 
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.servlet.http.HttpServlet;
+
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.commons.lang3.SystemUtils;
 import org.apache.commons.logging.Log;
@@ -118,6 +119,7 @@ import org.apache.hadoop.hbase.master.LoadBalancer;
 import org.apache.hadoop.hbase.master.RegionState.State;
 import org.apache.hadoop.hbase.mob.MobCacheConfig;
 import org.apache.hadoop.hbase.procedure.RegionServerProcedureManagerHost;
+import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
 import org.apache.hadoop.hbase.quotas.FileSystemUtilizationChore;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
 import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager;
@@ -128,6 +130,7 @@ import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequester;
 import org.apache.hadoop.hbase.regionserver.handler.CloseMetaHandler;
 import org.apache.hadoop.hbase.regionserver.handler.CloseRegionHandler;
+import org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler;
 import org.apache.hadoop.hbase.regionserver.handler.RegionReplicaFlushHandler;
 import org.apache.hadoop.hbase.regionserver.throttle.FlushThroughputControllerFactory;
 import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
@@ -175,6 +178,7 @@ import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
@@ -206,6 +210,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProto
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionSpaceUseReportRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportProcedureDoneRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse;
@@ -1931,6 +1936,8 @@ public class HRegionServer extends HasThread implements
           conf.getInt("hbase.regionserver.region.replica.flusher.threads",
               conf.getInt("hbase.regionserver.executor.openregion.threads", 3)));
     }
+    this.executorService.startExecutorService(ExecutorType.RS_REFRESH_PEER,
+      conf.getInt("hbase.regionserver.executor.refresh.peer.threads", 2));
 
     Threads.setDaemonThreadRunning(this.walRoller.getThread(), getName() + ".logRoller",
     uncaughtExceptionHandler);
@@ -3724,4 +3731,60 @@ public class HRegionServer extends HasThread implements
     return ConnectionUtils.createShortCircuitConnection(conf, null, user, this.serverName,
         this.rpcServices, this.rpcServices);
   }
+
+  public void executeProcedure(long procId, RSProcedureCallable callable) {
+    executorService.submit(new RSProcedureHandler(this, procId, callable));
+  }
+
+  public void reportProcedureDone(long procId, Throwable error) {
+    ReportProcedureDoneRequest.Builder builder =
+      ReportProcedureDoneRequest.newBuilder().setProcId(procId);
+    if (error != null) {
+      builder.setStatus(ReportProcedureDoneRequest.Status.ERROR)
+          .setError(Throwables.getStackTraceAsString(error));
+    } else {
+      builder.setStatus(ReportProcedureDoneRequest.Status.SUCCESS);
+    }
+    ReportProcedureDoneRequest request = builder.build();
+    int tries = 0;
+    long pauseTime = INIT_PAUSE_TIME_MS;
+    while (keepLooping()) {
+      RegionServerStatusService.BlockingInterface rss = rssStub;
+      try {
+        if (rss == null) {
+          createRegionServerStatusStub();
+          continue;
+        }
+        rss.reportProcedureDone(null, request);
+        // Log if we had to retry else don't log unless TRACE. We want to
+        // know if were successful after an attempt showed in logs as failed.
+        if (tries > 0 || LOG.isTraceEnabled()) {
+          LOG.info("PROCEDURE REPORTED " + request);
+        }
+        return;
+      } catch (ServiceException se) {
+        IOException ioe = ProtobufUtil.getRemoteException(se);
+        boolean pause =
+          ioe instanceof ServerNotRunningYetException || ioe instanceof PleaseHoldException;
+        if (pause) {
+          // Do backoff else we flood the Master with requests.
+          pauseTime = ConnectionUtils.getPauseTime(INIT_PAUSE_TIME_MS, tries);
+        } else {
+          pauseTime = INIT_PAUSE_TIME_MS; // Reset.
+        }
+        LOG.info(
+          "Failed to report transition " + TextFormat.shortDebugString(request) + "; retry (#" +
+            tries + ")" + (pause ? " after " + pauseTime + "ms delay (Master is coming online...)."
+              : " immediately."),
+          ioe);
+        if (pause) {
+          Threads.sleep(pauseTime);
+        }
+        tries++;
+        if (rssStub == rss) {
+          rssStub = null;
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index a77856a..4f10964 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -100,6 +99,7 @@ import org.apache.hadoop.hbase.ipc.RpcServerInterface;
 import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.master.MasterRpcServices;
+import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
 import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement;
 import org.apache.hadoop.hbase.quotas.OperationQuota;
 import org.apache.hadoop.hbase.quotas.QuotaUtil;
@@ -170,6 +170,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionR
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RemoteProcedureRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryRequest;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest;
@@ -3432,23 +3433,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
   }
 
   @Override
-  public ExecuteProceduresResponse executeProcedures(RpcController controller,
-       ExecuteProceduresRequest request) throws ServiceException {
-    ExecuteProceduresResponse.Builder builder = ExecuteProceduresResponse.newBuilder();
-    if (request.getOpenRegionCount() > 0) {
-      for (OpenRegionRequest req : request.getOpenRegionList()) {
-        builder.addOpenRegion(openRegion(controller, req));
-      }
-    }
-    if (request.getCloseRegionCount() > 0) {
-      for (CloseRegionRequest req : request.getCloseRegionList()) {
-        builder.addCloseRegion(closeRegion(controller, req));
-      }
-    }
-    return builder.build();
-  }
-
-  @Override
   public ClearRegionBlockCacheResponse clearRegionBlockCache(RpcController controller,
       ClearRegionBlockCacheRequest request) {
     ClearRegionBlockCacheResponse.Builder builder =
@@ -3464,4 +3448,38 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
     }
     return builder.setStats(ProtobufUtil.toCacheEvictionStats(stats.build())).build();
   }
+
+  @Override
+  public ExecuteProceduresResponse executeProcedures(RpcController controller,
+      ExecuteProceduresRequest request) throws ServiceException {
+    if (request.getOpenRegionCount() > 0) {
+      for (OpenRegionRequest req : request.getOpenRegionList()) {
+        openRegion(controller, req);
+      }
+    }
+    if (request.getCloseRegionCount() > 0) {
+      for (CloseRegionRequest req : request.getCloseRegionList()) {
+        closeRegion(controller, req);
+      }
+    }
+    if (request.getProcCount() > 0) {
+      for (RemoteProcedureRequest req : request.getProcList()) {
+        RSProcedureCallable callable;
+        try {
+          callable =
+            Class.forName(req.getProcClass()).asSubclass(RSProcedureCallable.class).newInstance();
+        } catch (Exception e) {
+          // here we just ignore the error as this should not happen and we do not provide a general
+          // way to report errors for all types of remote procedure. The procedure will hang at
+          // master side but after you solve the problem and restart master it will be executed
+          // again and pass.
+          LOG.warn("create procedure of type " + req.getProcClass() + " failed, give up", e);
+          continue;
+        }
+        callable.init(req.getProcData().toByteArray(), regionServer);
+        regionServer.executeProcedure(req.getProcId(), callable);
+      }
+    }
+    return ExecuteProceduresResponse.getDefaultInstance();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
new file mode 100644
index 0000000..94bcfec
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RSProcedureHandler.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.handler;
+
+import org.apache.hadoop.hbase.executor.EventHandler;
+import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A event handler for running procedure.
+ */
+@InterfaceAudience.Private
+public class RSProcedureHandler extends EventHandler {
+
+  private final long procId;
+
+  private final RSProcedureCallable callable;
+
+  public RSProcedureHandler(HRegionServer rs, long procId, RSProcedureCallable callable) {
+    super(rs, callable.getEventType());
+    this.procId = procId;
+    this.callable = callable;
+  }
+
+  @Override
+  public void process() {
+    Exception error = null;
+    try {
+      callable.call();
+    } catch (Exception e) {
+      error = e;
+    }
+    ((HRegionServer) server).reportProcedureDone(procId, error);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
index f4365ea..778c34c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestAssignmentManager.java
@@ -536,26 +536,16 @@ public class TestAssignmentManager {
   private class NoopRsExecutor implements MockRSExecutor {
     public ExecuteProceduresResponse sendRequest(ServerName server,
         ExecuteProceduresRequest request) throws IOException {
-      ExecuteProceduresResponse.Builder builder = ExecuteProceduresResponse.newBuilder();
       if (request.getOpenRegionCount() > 0) {
-        for (OpenRegionRequest req: request.getOpenRegionList()) {
-          OpenRegionResponse.Builder resp = OpenRegionResponse.newBuilder();
-          for (RegionOpenInfo openReq: req.getOpenInfoList()) {
-            RegionOpeningState state = execOpenRegion(server, openReq);
-            if (state != null) {
-              resp.addOpeningState(state);
-            }
+        for (OpenRegionRequest req : request.getOpenRegionList()) {
+          for (RegionOpenInfo openReq : req.getOpenInfoList()) {
+            execOpenRegion(server, openReq);
           }
-          builder.addOpenRegion(resp.build());
         }
       }
       if (request.getCloseRegionCount() > 0) {
-        for (CloseRegionRequest req: request.getCloseRegionList()) {
-          CloseRegionResponse resp = execCloseRegion(server,
-              req.getRegion().getValue().toByteArray());
-          if (resp != null) {
-            builder.addCloseRegion(resp);
-          }
+        for (CloseRegionRequest req : request.getCloseRegionList()) {
+          execCloseRegion(server, req.getRegion().getValue().toByteArray());
         }
       }
       return ExecuteProceduresResponse.newBuilder().build();

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
new file mode 100644
index 0000000..44343d7
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+public class DummyModifyPeerProcedure extends ModifyPeerProcedure {
+
+  public DummyModifyPeerProcedure() {
+  }
+
+  public DummyModifyPeerProcedure(String peerId) {
+    super(peerId);
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.ADD;
+  }
+
+  @Override
+  protected boolean updatePeerStorage() throws IOException {
+    return true;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDummyModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDummyModifyPeerProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDummyModifyPeerProcedure.java
new file mode 100644
index 0000000..ec06306
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/TestDummyModifyPeerProcedure.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import static org.junit.Assert.assertTrue;
+
+import java.util.HashSet;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.testclassification.MasterTests;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ MasterTests.class, LargeTests.class })
+public class TestDummyModifyPeerProcedure {
+
+  private static final HBaseTestingUtility UTIL = new HBaseTestingUtility();
+
+  private static String PEER_ID;
+
+  private static Path DIR;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    UTIL.startMiniCluster(3);
+    PEER_ID = "testPeer";
+    DIR = new Path("/" + PEER_ID);
+    UTIL.getTestFileSystem().mkdirs(DIR);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void test() throws Exception {
+    ProcedureExecutor<?> executor =
+        UTIL.getMiniHBaseCluster().getMaster().getMasterProcedureExecutor();
+    long procId = executor.submitProcedure(new DummyModifyPeerProcedure(PEER_ID));
+    UTIL.waitFor(30000, new Waiter.Predicate<Exception>() {
+
+      @Override
+      public boolean evaluate() throws Exception {
+        return executor.isFinished(procId);
+      }
+    });
+    Set<String> serverNames = UTIL.getHBaseCluster().getRegionServerThreads().stream()
+        .map(t -> t.getRegionServer().getServerName().toString())
+        .collect(Collectors.toCollection(HashSet::new));
+    for (FileStatus s : UTIL.getTestFileSystem().listStatus(DIR)) {
+      assertTrue(serverNames.remove(s.getPath().getName()));
+    }
+    assertTrue(serverNames.isEmpty());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/123cf75c/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
index 6791465..29c702b 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
@@ -30,12 +30,14 @@ import com.google.protobuf.RpcCallback;
 import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
 import com.google.protobuf.ServiceException;
+
 import java.io.IOException;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -120,8 +122,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import org.apache.hadoop.hbase.security.Superusers;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.access.Permission.Action;
-import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.testclassification.SecurityTests;
 import org.apache.hadoop.hbase.tool.LoadIncrementalHFiles;
@@ -137,6 +137,9 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.rules.TestName;
 
+import org.apache.hadoop.hbase.shaded.ipc.protobuf.generated.TestProcedureProtos;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState;
+
 /**
  * Performs authorization checks for common operations, according to different
  * levels of authorized users.


[14/24] hbase git commit: HBASE-19538 Removed unnecessary semicolons in hbase-client

Posted by zh...@apache.org.
HBASE-19538 Removed unnecessary semicolons in hbase-client


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dd00081c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dd00081c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dd00081c

Branch: refs/heads/HBASE-19397
Commit: dd00081c26fb4222e223b510e58923118cf9df43
Parents: 0f8ea39
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 17 16:15:20 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 20:50:18 2017 +0100

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hbase/ClusterStatus.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java | 2 +-
 .../java/org/apache/hadoop/hbase/client/CompactionState.java     | 3 ++-
 .../src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java | 4 ++--
 .../main/java/org/apache/hadoop/hbase/client/SnapshotType.java   | 3 ++-
 .../apache/hadoop/hbase/client/security/SecurityCapability.java  | 3 +--
 .../org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java     | 2 +-
 .../java/org/apache/hadoop/hbase/quotas/ThrottlingException.java | 2 +-
 .../java/org/apache/hadoop/hbase/client/TestAsyncProcess.java    | 2 +-
 9 files changed, 12 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
index f06d9b9..6b4c03a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java
@@ -483,6 +483,6 @@ public class ClusterStatus {
     BACKUP_MASTERS, /** status about backup masters */
     MASTER_COPROCESSORS, /** status about master coprocessors */
     REGIONS_IN_TRANSITION, /** status about regions in transition */
-    MASTER_INFO_PORT; /** master info port **/
+    MASTER_INFO_PORT /** master info port **/
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
index e184f7c..0774945 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java
@@ -332,7 +332,7 @@ public class MetaTableAccessor {
       parsedInfo = parseRegionInfoFromRegionName(regionName);
       row = getMetaKeyForRegion(parsedInfo);
     } catch (Exception parseEx) {
-      ; // Ignore. This is used with tableName passed as regionName.
+      // Ignore. This is used with tableName passed as regionName.
     }
     Get get = new Get(row);
     get.addFamily(HConstants.CATALOG_FAMILY);

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java
index 09a76e0..51f7d07 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.client;
+
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -23,5 +24,5 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Public
 public enum CompactionState {
-  NONE, MINOR, MAJOR, MAJOR_AND_MINOR;
+  NONE, MINOR, MAJOR, MAJOR_AND_MINOR
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index ef054b6..207d28b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -3082,7 +3082,7 @@ public class HBaseAdmin implements Admin {
                 CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request);
             return this.master.execMasterService(getRpcController(), csr);
           }
-        };) {
+        }) {
           // TODO: Are we retrying here? Does not seem so. We should use RetryingRpcCaller
           callable.prepare(false);
           int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout();
@@ -3619,7 +3619,7 @@ public class HBaseAdmin implements Admin {
       return "Operation: " + getOperationType() + ", "
           + "Table Name: " + tableName.getNameWithNamespaceInclAsString();
 
-    };
+    }
 
     protected abstract class TableWaitForStateCallable implements WaitForStateCallable {
       @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java
index 9e162b4..a4e4cc0 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/SnapshotType.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hbase.client;
+
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
@@ -23,5 +24,5 @@ import org.apache.yetus.audience.InterfaceAudience;
  */
 @InterfaceAudience.Public
 public enum SnapshotType {
-  DISABLED, FLUSH, SKIPFLUSH;
+  DISABLED, FLUSH, SKIPFLUSH
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java
index 37a3135..c410e2d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/security/SecurityCapability.java
@@ -57,5 +57,4 @@ public enum SecurityCapability {
         throw new IllegalArgumentException("Unknown SecurityCapability value " + value);
     }
   }
-};
-
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
index 8d70f44..aad83a7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/SpaceViolationPolicy.java
@@ -41,5 +41,5 @@ public enum SpaceViolationPolicy {
   /**
    * Disallows any updates (but allows deletes and compactions) on the table(s).
    */
-  NO_INSERTS;
+  NO_INSERTS
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
index 5890805..357ef89 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/ThrottlingException.java
@@ -67,7 +67,7 @@ public class ThrottlingException extends QuotaExceededException {
       int index = msg.indexOf(MSG_TYPE[i]);
       if (index >= 0) {
         String waitTimeStr = msg.substring(index + MSG_TYPE[i].length() + MSG_WAIT.length());
-        type = Type.values()[i];;
+        type = Type.values()[i];
         waitInterval = timeFromString(waitTimeStr);
         break;
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/dd00081c/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
index ea75ac8..f487568 100644
--- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
+++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java
@@ -659,7 +659,7 @@ public class TestAsyncProcess {
 
     MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true);
     BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE);
-    try (BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap);) {
+    try (BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap)) {
       mutator.mutate(puts);
       mutator.flush();
       List<AsyncRequestFuture> reqs = ap.allReqs;


[21/24] hbase git commit: HBASE-19558 TestRegionsOnMasterOptions hack so it works reliably

Posted by zh...@apache.org.
HBASE-19558 TestRegionsOnMasterOptions hack so it works reliably


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c78ad52
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c78ad52
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c78ad52

Branch: refs/heads/HBASE-19397
Commit: 3c78ad52157576641ee6b42e874b5015b792f34d
Parents: 5e7d16a
Author: Michael Stack <st...@apache.org>
Authored: Tue Dec 19 15:57:11 2017 -0800
Committer: Michael Stack <st...@apache.org>
Committed: Tue Dec 19 15:59:02 2017 -0800

----------------------------------------------------------------------
 .../hbase/master/balancer/TestRegionsOnMasterOptions.java   | 9 +++++++++
 1 file changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c78ad52/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java
index 7319820..58c3333 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRegionsOnMasterOptions.java
@@ -193,6 +193,15 @@ public class TestRegionsOnMasterOptions {
         // still only carry system regions post crash.
         assertEquals(masterCount, mNewActualCount);
       }
+      // Disable balancer and wait till RIT done else cluster won't go down.
+      TEST_UTIL.getAdmin().balancerSwitch(false, true);
+      while (true) {
+        if (!TEST_UTIL.getHBaseCluster().getMaster().getAssignmentManager().
+            isMetaRegionInTransition()) {
+          break;
+        }
+        Threads.sleep(10);
+      }
     } finally {
       LOG.info("Running shutdown of cluster");
       TEST_UTIL.shutdownMiniCluster();


[16/24] hbase git commit: HBASE-19481 Enabled Checkstyle to fail on violations in hbase-error-prone

Posted by zh...@apache.org.
HBASE-19481 Enabled Checkstyle to fail on violations in hbase-error-prone


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec7bf573
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec7bf573
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec7bf573

Branch: refs/heads/HBASE-19397
Commit: ec7bf57390d8302598901a6e80ca710121cb5f48
Parents: d50ae03
Author: Jan Hentschel <ja...@ultratendency.com>
Authored: Sun Dec 10 21:46:13 2017 +0100
Committer: Jan Hentschel <ja...@ultratendency.com>
Committed: Tue Dec 19 21:11:55 2017 +0100

----------------------------------------------------------------------
 hbase-build-support/hbase-error-prone/pom.xml | 23 +++++++++++++++++++++-
 1 file changed, 22 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/ec7bf573/hbase-build-support/hbase-error-prone/pom.xml
----------------------------------------------------------------------
diff --git a/hbase-build-support/hbase-error-prone/pom.xml b/hbase-build-support/hbase-error-prone/pom.xml
index 067e154..b90d733 100644
--- a/hbase-build-support/hbase-error-prone/pom.xml
+++ b/hbase-build-support/hbase-error-prone/pom.xml
@@ -65,4 +65,25 @@
       <scope>provided</scope>
     </dependency>
   </dependencies>
-</project>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>checkstyle</id>
+            <phase>validate</phase>
+            <goals>
+              <goal>check</goal>
+            </goals>
+            <configuration>
+              <failOnViolation>true</failOnViolation>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+</project>
\ No newline at end of file


[02/24] hbase git commit: HBASE-19531 Remove needless volatile declaration

Posted by zh...@apache.org.
HBASE-19531 Remove needless volatile declaration

Signed-off-by: Chia-Ping Tsai <ch...@gmail.com>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9d0c7c6d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9d0c7c6d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9d0c7c6d

Branch: refs/heads/HBASE-19397
Commit: 9d0c7c6dfbcba0907cbbc2244eac570fcc4d58a5
Parents: c8bf03f
Author: Yun Chi-Shih <le...@gmail.com>
Authored: Mon Dec 18 23:24:58 2017 +0800
Committer: Chia-Ping Tsai <ch...@gmail.com>
Committed: Tue Dec 19 00:34:28 2017 +0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java  | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/9d0c7c6d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
index c772b78..b91038d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
@@ -73,7 +73,7 @@ public class BufferedMutatorImpl implements BufferedMutator {
    * The {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation.
    */
   private final AtomicInteger undealtMutationCount = new AtomicInteger(0);
-  private volatile long writeBufferSize;
+  private final long writeBufferSize;
   private final int maxKeyValueSize;
   private final ExecutorService pool;
   private final AtomicInteger rpcTimeout;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9d0c7c6d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
----------------------------------------------------------------------
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
index 2ea7c74..ef054b6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java
@@ -227,7 +227,7 @@ public class HBaseAdmin implements Admin {
 
   private ClusterConnection connection;
 
-  private volatile Configuration conf;
+  private final Configuration conf;
   private final long pause;
   private final int numRetries;
   private final int syncWaitTimeout;

http://git-wip-us.apache.org/repos/asf/hbase/blob/9d0c7c6d/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
index de21ce0..9e228ad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ZKNamespaceManager.java
@@ -51,7 +51,7 @@ import java.util.concurrent.ConcurrentSkipListMap;
 public class ZKNamespaceManager extends ZKListener {
   private static final Log LOG = LogFactory.getLog(ZKNamespaceManager.class);
   private final String nsZNode;
-  private volatile NavigableMap<String,NamespaceDescriptor> cache;
+  private final NavigableMap<String,NamespaceDescriptor> cache;
 
   public ZKNamespaceManager(ZKWatcher zkw) throws IOException {
     super(zkw);


[23/24] hbase git commit: HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure

Posted by zh...@apache.org.
HBASE-19524 Master side changes for moving peer modification from zk watcher to procedure


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/87a5b422
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/87a5b422
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/87a5b422

Branch: refs/heads/HBASE-19397
Commit: 87a5b4225123ccb7745b2e9cc4cca093455dbcca
Parents: 123cf75
Author: zhangduo <zh...@apache.org>
Authored: Mon Dec 18 15:22:36 2017 +0800
Committer: zhangduo <zh...@apache.org>
Committed: Wed Dec 20 09:29:13 2017 +0800

----------------------------------------------------------------------
 .../procedure2/RemoteProcedureDispatcher.java   |   3 +-
 .../src/main/protobuf/MasterProcedure.proto     |  21 +++-
 .../src/main/protobuf/RegionServerStatus.proto  |   3 +-
 .../src/main/protobuf/Replication.proto         |   5 +
 .../replication/ReplicationPeersZKImpl.java     |   4 +-
 .../org/apache/hadoop/hbase/master/HMaster.java | 112 ++++++++-----------
 .../hadoop/hbase/master/MasterRpcServices.java  |   4 +-
 .../hadoop/hbase/master/MasterServices.java     |  26 +++--
 .../assignment/RegionTransitionProcedure.java   |   9 +-
 .../master/procedure/MasterProcedureEnv.java    |   5 +
 .../master/procedure/ProcedurePrepareLatch.java |   2 +-
 .../master/replication/AddPeerProcedure.java    |  97 ++++++++++++++++
 .../replication/DisablePeerProcedure.java       |  70 ++++++++++++
 .../master/replication/EnablePeerProcedure.java |  69 ++++++++++++
 .../master/replication/ModifyPeerProcedure.java |  97 +++++++++++++---
 .../master/replication/RefreshPeerCallable.java |  67 -----------
 .../replication/RefreshPeerProcedure.java       |  28 +++--
 .../master/replication/RemovePeerProcedure.java |  69 ++++++++++++
 .../master/replication/ReplicationManager.java  |  76 ++++++-------
 .../replication/UpdatePeerConfigProcedure.java  |  92 +++++++++++++++
 .../hbase/regionserver/HRegionServer.java       |   6 +-
 .../regionserver/RefreshPeerCallable.java       |  70 ++++++++++++
 .../hbase/master/MockNoopMasterServices.java    |  23 ++--
 .../replication/DummyModifyPeerProcedure.java   |  13 ++-
 24 files changed, 739 insertions(+), 232 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
----------------------------------------------------------------------
diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
index 904268d..880e676 100644
--- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
+++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java
@@ -242,9 +242,8 @@ public abstract class RemoteProcedureDispatcher<TEnv, TRemote extends Comparable
     /**
      * Called when RS tells the remote procedure is failed through the {@code reportProcedureDone}
      * method.
-     * @param error the error message
      */
-    void remoteOperationFailed(TEnv env, String error);
+    void remoteOperationFailed(TEnv env, RemoteProcedureException error);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
index 0e2bdba..ae676ea 100644
--- a/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/MasterProcedure.proto
@@ -27,6 +27,7 @@ option optimize_for = SPEED;
 import "HBase.proto";
 import "RPC.proto";
 import "Snapshot.proto";
+import "Replication.proto";
 
 // ============================================================================
 //  WARNING - Compatibility rules
@@ -367,9 +368,10 @@ message GCMergedRegionsStateData {
 }
 
 enum PeerModificationState {
-  UPDATE_PEER_STORAGE = 1;
-  REFRESH_PEER_ON_RS = 2;
-  POST_PEER_MODIFICATION = 3;
+  PRE_PEER_MODIFICATION = 1;
+  UPDATE_PEER_STORAGE = 2;
+  REFRESH_PEER_ON_RS = 3;
+  POST_PEER_MODIFICATION = 4;
 }
 
 message PeerModificationStateData {
@@ -394,4 +396,17 @@ message RefreshPeerParameter {
   required string peer_id = 1;
   required PeerModificationType type = 2;
   required ServerName target_server = 3;
+}
+
+message ModifyPeerStateData {
+  required string peer_id = 1;
+}
+
+message AddPeerStateData {
+  required ReplicationPeer peer_config = 1;
+  required bool enabled = 2;
+}
+
+message UpdatePeerConfigStateData {
+  required ReplicationPeer peer_config = 1;
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
index eb396ac..4f75941 100644
--- a/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/RegionServerStatus.proto
@@ -28,6 +28,7 @@ option optimize_for = SPEED;
 
 import "HBase.proto";
 import "ClusterStatus.proto";
+import "ErrorHandling.proto";
 
 message RegionServerStartupRequest {
   /** Port number this regionserver is up on */
@@ -152,7 +153,7 @@ message ReportProcedureDoneRequest {
     ERROR = 2;
   }
   required Status status = 2;
-  optional string error = 3;
+  optional ForeignExceptionMessage error = 3;
 }
 
 message ReportProcedureDoneResponse {

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-protocol-shaded/src/main/protobuf/Replication.proto
----------------------------------------------------------------------
diff --git a/hbase-protocol-shaded/src/main/protobuf/Replication.proto b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
index 8657c25..9f7b4c2 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Replication.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Replication.proto
@@ -84,6 +84,7 @@ message AddReplicationPeerRequest {
 }
 
 message AddReplicationPeerResponse {
+  optional uint64 proc_id = 1;
 }
 
 message RemoveReplicationPeerRequest {
@@ -91,6 +92,7 @@ message RemoveReplicationPeerRequest {
 }
 
 message RemoveReplicationPeerResponse {
+  optional uint64 proc_id = 1;
 }
 
 message EnableReplicationPeerRequest {
@@ -98,6 +100,7 @@ message EnableReplicationPeerRequest {
 }
 
 message EnableReplicationPeerResponse {
+  optional uint64 proc_id = 1;
 }
 
 message DisableReplicationPeerRequest {
@@ -105,6 +108,7 @@ message DisableReplicationPeerRequest {
 }
 
 message DisableReplicationPeerResponse {
+  optional uint64 proc_id = 1;
 }
 
 message GetReplicationPeerConfigRequest {
@@ -122,6 +126,7 @@ message UpdateReplicationPeerConfigRequest {
 }
 
 message UpdateReplicationPeerConfigResponse {
+  optional uint64 proc_id = 1;
 }
 
 message ListReplicationPeersRequest {

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
----------------------------------------------------------------------
diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
index ca99f65..6cb0a20 100644
--- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
+++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeersZKImpl.java
@@ -531,7 +531,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
         for (String queueId : queueIds) {
           ReplicationQueueInfo queueInfo = new ReplicationQueueInfo(queueId);
           if (queueInfo.getPeerId().equals(peerId)) {
-            throw new ReplicationException("undeleted queue for peerId: " + peerId
+            throw new IllegalArgumentException("undeleted queue for peerId: " + peerId
                 + ", replicator: " + replicator + ", queueId: " + queueId);
           }
         }
@@ -539,7 +539,7 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
       // Check for hfile-refs queue
       if (-1 != ZKUtil.checkExists(zookeeper, hfileRefsZNode)
           && queuesClient.getAllPeersFromHFileRefsQueue().contains(peerId)) {
-        throw new ReplicationException("Undeleted queue for peerId: " + peerId
+        throw new IllegalArgumentException("Undeleted queue for peerId: " + peerId
             + ", found in hfile-refs node path " + hfileRefsZNode);
       }
     } catch (KeeperException e) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 0348b53..f5b8feb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -17,10 +17,9 @@
  */
 package org.apache.hadoop.hbase.master;
 
-import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
+import com.google.protobuf.Descriptors;
+import com.google.protobuf.Service;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.lang.reflect.Constructor;
@@ -49,6 +48,11 @@ import java.util.concurrent.atomic.AtomicReference;
 import java.util.function.Function;
 import java.util.regex.Pattern;
 
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -122,7 +126,13 @@ import org.apache.hadoop.hbase.master.procedure.ModifyTableProcedure;
 import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
 import org.apache.hadoop.hbase.master.procedure.RecoverMetaProcedure;
 import org.apache.hadoop.hbase.master.procedure.TruncateTableProcedure;
+import org.apache.hadoop.hbase.master.replication.AddPeerProcedure;
+import org.apache.hadoop.hbase.master.replication.DisablePeerProcedure;
+import org.apache.hadoop.hbase.master.replication.EnablePeerProcedure;
+import org.apache.hadoop.hbase.master.replication.ModifyPeerProcedure;
+import org.apache.hadoop.hbase.master.replication.RemovePeerProcedure;
 import org.apache.hadoop.hbase.master.replication.ReplicationManager;
+import org.apache.hadoop.hbase.master.replication.UpdatePeerConfigProcedure;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.mob.MobConstants;
 import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer;
@@ -135,6 +145,7 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
 import org.apache.hadoop.hbase.procedure2.store.wal.WALProcedureStore;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
 import org.apache.hadoop.hbase.quotas.MasterSpaceQuotaObserver;
@@ -165,7 +176,6 @@ import org.apache.hadoop.hbase.util.Addressing;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EncryptionTest;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HFileArchiveUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.IdLock;
@@ -180,8 +190,8 @@ import org.apache.hadoop.hbase.zookeeper.MasterMaintenanceModeTracker;
 import org.apache.hadoop.hbase.zookeeper.RegionNormalizerTracker;
 import org.apache.hadoop.hbase.zookeeper.ZKClusterId;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
-import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
+import org.apache.hadoop.hbase.zookeeper.ZNodePaths;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.apache.zookeeper.KeeperException;
 import org.eclipse.jetty.server.Server;
@@ -200,9 +210,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceViolat
 import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos;
 
-import com.google.protobuf.Descriptors;
-import com.google.protobuf.Service;
-
 /**
  * HMaster is the "master server" for HBase. An HBase cluster has one active
  * master.  If many masters are started, all compete.  Whichever wins goes on to
@@ -327,15 +334,15 @@ public class HMaster extends HRegionServer implements MasterServices {
   private volatile boolean activeMaster = false;
 
   // flag set after we complete initialization once active
-  private final ProcedureEvent initialized = new ProcedureEvent("master initialized");
+  private final ProcedureEvent<?> initialized = new ProcedureEvent<>("master initialized");
 
   // flag set after master services are started,
   // initialization may have not completed yet.
   volatile boolean serviceStarted = false;
 
   // flag set after we complete assignMeta.
-  private final ProcedureEvent serverCrashProcessingEnabled =
-    new ProcedureEvent("server crash processing");
+  private final ProcedureEvent<?> serverCrashProcessingEnabled =
+    new ProcedureEvent<>("server crash processing");
 
   // Maximum time we should run balancer for
   private final int maxBlancingTime;
@@ -1193,7 +1200,6 @@ public class HMaster extends HRegionServer implements MasterServices {
 
   private void startProcedureExecutor() throws IOException {
     final MasterProcedureEnv procEnv = new MasterProcedureEnv(this);
-    final Path rootDir = FSUtils.getRootDir(conf);
 
     procedureStore = new WALProcedureStore(conf,
         new MasterProcedureEnv.WALStoreLeaseRecovery(this));
@@ -2312,11 +2318,8 @@ public class HMaster extends HRegionServer implements MasterServices {
             return true;
           }
           Pair<RegionInfo, ServerName> pair =
-              new Pair(MetaTableAccessor.getRegionInfo(data),
+              new Pair<>(MetaTableAccessor.getRegionInfo(data),
                   MetaTableAccessor.getServerName(data,0));
-          if (pair == null) {
-            return false;
-          }
           if (!pair.getFirst().getTable().equals(tableName)) {
             return false;
           }
@@ -2740,7 +2743,7 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public ProcedureEvent getInitializedEvent() {
+  public ProcedureEvent<?> getInitializedEvent() {
     return initialized;
   }
 
@@ -2759,7 +2762,7 @@ public class HMaster extends HRegionServer implements MasterServices {
     procedureExecutor.getEnvironment().setEventReady(serverCrashProcessingEnabled, b);
   }
 
-  public ProcedureEvent getServerCrashProcessingEnabledEvent() {
+  public ProcedureEvent<?> getServerCrashProcessingEnabledEvent() {
     return serverCrashProcessingEnabled;
   }
 
@@ -3310,54 +3313,36 @@ public class HMaster extends HRegionServer implements MasterServices {
     return favoredNodesManager;
   }
 
+  private long executePeerProcedure(ModifyPeerProcedure procedure) throws IOException {
+    long procId = procedureExecutor.submitProcedure(procedure);
+    procedure.getLatch().await();
+    return procId;
+  }
+
   @Override
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
+  public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
       throws ReplicationException, IOException {
-    if (cpHost != null) {
-      cpHost.preAddReplicationPeer(peerId, peerConfig);
-    }
-    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config="
-        + peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"));
-    this.replicationManager.addReplicationPeer(peerId, peerConfig, enabled);
-    if (cpHost != null) {
-      cpHost.postAddReplicationPeer(peerId, peerConfig);
-    }
+    LOG.info(getClientIdAuditPrefix() + " creating replication peer, id=" + peerId + ", config=" +
+      peerConfig + ", state=" + (enabled ? "ENABLED" : "DISABLED"));
+    return executePeerProcedure(new AddPeerProcedure(peerId, peerConfig, enabled));
   }
 
   @Override
-  public void removeReplicationPeer(String peerId) throws ReplicationException, IOException {
-    if (cpHost != null) {
-      cpHost.preRemoveReplicationPeer(peerId);
-    }
+  public long removeReplicationPeer(String peerId) throws ReplicationException, IOException {
     LOG.info(getClientIdAuditPrefix() + " removing replication peer, id=" + peerId);
-    this.replicationManager.removeReplicationPeer(peerId);
-    if (cpHost != null) {
-      cpHost.postRemoveReplicationPeer(peerId);
-    }
+    return executePeerProcedure(new RemovePeerProcedure(peerId));
   }
 
   @Override
-  public void enableReplicationPeer(String peerId) throws ReplicationException, IOException {
-    if (cpHost != null) {
-      cpHost.preEnableReplicationPeer(peerId);
-    }
+  public long enableReplicationPeer(String peerId) throws ReplicationException, IOException {
     LOG.info(getClientIdAuditPrefix() + " enable replication peer, id=" + peerId);
-    this.replicationManager.enableReplicationPeer(peerId);
-    if (cpHost != null) {
-      cpHost.postEnableReplicationPeer(peerId);
-    }
+    return executePeerProcedure(new EnablePeerProcedure(peerId));
   }
 
   @Override
-  public void disableReplicationPeer(String peerId) throws ReplicationException, IOException {
-    if (cpHost != null) {
-      cpHost.preDisableReplicationPeer(peerId);
-    }
+  public long disableReplicationPeer(String peerId) throws ReplicationException, IOException {
     LOG.info(getClientIdAuditPrefix() + " disable replication peer, id=" + peerId);
-    this.replicationManager.disableReplicationPeer(peerId);
-    if (cpHost != null) {
-      cpHost.postDisableReplicationPeer(peerId);
-    }
+    return executePeerProcedure(new DisablePeerProcedure(peerId));
   }
 
   @Override
@@ -3376,17 +3361,11 @@ public class HMaster extends HRegionServer implements MasterServices {
   }
 
   @Override
-  public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+  public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
       throws ReplicationException, IOException {
-    if (cpHost != null) {
-      cpHost.preUpdateReplicationPeerConfig(peerId, peerConfig);
-    }
-    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId
-        + ", config=" + peerConfig);
-    this.replicationManager.updatePeerConfig(peerId, peerConfig);
-    if (cpHost != null) {
-      cpHost.postUpdateReplicationPeerConfig(peerId, peerConfig);
-    }
+    LOG.info(getClientIdAuditPrefix() + " update replication peer config, id=" + peerId +
+      ", config=" + peerConfig);
+    return executePeerProcedure(new UpdatePeerConfigProcedure(peerId, peerConfig));
   }
 
   @Override
@@ -3539,10 +3518,15 @@ public class HMaster extends HRegionServer implements MasterServices {
     }
   }
 
-  public void remoteProcedureFailed(long procId, String error) {
+  public void remoteProcedureFailed(long procId, RemoteProcedureException error) {
     RemoteProcedure<MasterProcedureEnv, ?> procedure = getRemoteProcedure(procId);
     if (procedure != null) {
       procedure.remoteOperationFailed(procedureExecutor.getEnvironment(), error);
     }
   }
+
+  @Override
+  public ReplicationManager getReplicationManager() {
+    return replicationManager;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index 599e035..006a869 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.procedure2.LockType;
 import org.apache.hadoop.hbase.procedure2.LockedResource;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureUtil;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
 import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
 import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService;
 import org.apache.hadoop.hbase.quotas.MasterQuotaManager;
@@ -2260,7 +2261,8 @@ public class MasterRpcServices extends RSRpcServices
     if (request.getStatus() == ReportProcedureDoneRequest.Status.SUCCESS) {
       master.remoteProcedureCompleted(request.getProcId());
     } else {
-      master.remoteProcedureFailed(request.getProcId(), request.getError());
+      master.remoteProcedureFailed(request.getProcId(),
+        RemoteProcedureException.fromProto(request.getError()));
     }
     return ReportProcedureDoneResponse.getDefaultInstance();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
index 6b3c212..43df8b1 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterServices.java
@@ -1,5 +1,4 @@
-/*
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,10 +17,11 @@
  */
 package org.apache.hadoop.hbase.master;
 
+import com.google.protobuf.Service;
+
 import java.io.IOException;
 import java.util.List;
 
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableDescriptors;
@@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.replication.ReplicationManager;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure2.LockedResource;
@@ -52,8 +53,6 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 
-import com.google.protobuf.Service;
-
 /**
  * A curated subset of services provided by {@link HMaster}.
  * For use internally only. Passed to Managers, Services and Chores so can pass less-than-a
@@ -136,7 +135,7 @@ public interface MasterServices extends Server {
    * @return Tripped when Master has finished initialization.
    */
   @VisibleForTesting
-  public ProcedureEvent getInitializedEvent();
+  public ProcedureEvent<?> getInitializedEvent();
 
   /**
    * @return Master's instance of {@link MetricsMaster}
@@ -430,26 +429,26 @@ public interface MasterServices extends Server {
    * @param peerConfig configuration for the replication slave cluster
    * @param enabled peer state, true if ENABLED and false if DISABLED
    */
-  void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
+  long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
       throws ReplicationException, IOException;
 
   /**
    * Removes a peer and stops the replication
    * @param peerId a short name that identifies the peer
    */
-  void removeReplicationPeer(String peerId) throws ReplicationException, IOException;
+  long removeReplicationPeer(String peerId) throws ReplicationException, IOException;
 
   /**
    * Restart the replication stream to the specified peer
    * @param peerId a short name that identifies the peer
    */
-  void enableReplicationPeer(String peerId) throws ReplicationException, IOException;
+  long enableReplicationPeer(String peerId) throws ReplicationException, IOException;
 
   /**
    * Stop the replication stream to the specified peer
    * @param peerId a short name that identifies the peer
    */
-  void disableReplicationPeer(String peerId) throws ReplicationException, IOException;
+  long disableReplicationPeer(String peerId) throws ReplicationException, IOException;
 
   /**
    * Returns the configured ReplicationPeerConfig for the specified peer
@@ -460,11 +459,16 @@ public interface MasterServices extends Server {
       IOException;
 
   /**
+   * Returns the {@link ReplicationManager}.
+   */
+  ReplicationManager getReplicationManager();
+
+  /**
    * Update the peerConfig for the specified peer
    * @param peerId a short name that identifies the peer
    * @param peerConfig new config for the peer
    */
-  void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+  long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
       throws ReplicationException, IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
index 47444ad..9dd2d37 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/RegionTransitionProcedure.java
@@ -16,7 +16,6 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
 package org.apache.hadoop.hbase.master.assignment;
 
 import java.io.IOException;
@@ -35,13 +34,13 @@ import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RegionTransitionState;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
 
-import org.apache.yetus.audience.InterfaceAudience;
-
 /**
  * Base class for the Assign and Unassign Procedure.
  *
@@ -417,7 +416,7 @@ public abstract class RegionTransitionProcedure
   }
 
   @Override
-  public void remoteOperationFailed(MasterProcedureEnv env, String error) {
+  public void remoteOperationFailed(MasterProcedureEnv env, RemoteProcedureException error) {
     // should not be called for region operation until we modified the open/close region procedure
     throw new UnsupportedOperationException();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
index c9c3ac9..3596f82 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/MasterProcedureEnv.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.master.HMaster;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
+import org.apache.hadoop.hbase.master.replication.ReplicationManager;
 import org.apache.hadoop.hbase.procedure2.Procedure;
 import org.apache.hadoop.hbase.procedure2.ProcedureEvent;
 import org.apache.hadoop.hbase.procedure2.store.ProcedureStore;
@@ -137,6 +138,10 @@ public class MasterProcedureEnv implements ConfigurationObserver {
     return remoteDispatcher;
   }
 
+  public ReplicationManager getReplicationManager() {
+    return master.getReplicationManager();
+  }
+
   public boolean isRunning() {
     if (this.master == null || this.master.getMasterProcedureExecutor() == null) return false;
     return master.getMasterProcedureExecutor().isRunning();

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
index 09d05e6..dbea6fa 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ProcedurePrepareLatch.java
@@ -64,7 +64,7 @@ public abstract class ProcedurePrepareLatch {
   protected abstract void countDown(final Procedure proc);
   public abstract void await() throws IOException;
 
-  protected static void releaseLatch(final ProcedurePrepareLatch latch, final Procedure proc) {
+  public static void releaseLatch(final ProcedurePrepareLatch latch, final Procedure proc) {
     if (latch != null) {
       latch.countDown(proc);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
new file mode 100644
index 0000000..c3862d8
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AddPeerProcedure.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.replication.ReplicationException;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.AddPeerStateData;
+
+/**
+ * The procedure for adding a new replication peer.
+ */
+@InterfaceAudience.Private
+public class AddPeerProcedure extends ModifyPeerProcedure {
+
+  private static final Log LOG = LogFactory.getLog(AddPeerProcedure.class);
+
+  private ReplicationPeerConfig peerConfig;
+
+  private boolean enabled;
+
+  public AddPeerProcedure() {
+  }
+
+  public AddPeerProcedure(String peerId, ReplicationPeerConfig peerConfig, boolean enabled) {
+    super(peerId);
+    this.peerConfig = peerConfig;
+    this.enabled = enabled;
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.ADD;
+  }
+
+  @Override
+  protected void prePeerModification(MasterProcedureEnv env) throws IOException {
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.preAddReplicationPeer(peerId, peerConfig);
+    }
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env) throws ReplicationException {
+    env.getReplicationManager().addReplicationPeer(peerId, peerConfig, enabled);
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) throws IOException {
+    LOG.info("Successfully added " + (enabled ? "ENABLED" : "DISABLED") + " peer " + peerId +
+      ", config " + peerConfig);
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      env.getMasterCoprocessorHost().postAddReplicationPeer(peerId, peerConfig);
+    }
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.serializeStateData(serializer);
+    serializer.serialize(AddPeerStateData.newBuilder()
+        .setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)).setEnabled(enabled).build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.deserializeStateData(serializer);
+    AddPeerStateData data = serializer.deserialize(AddPeerStateData.class);
+    peerConfig = ReplicationPeerConfigUtil.convert(data.getPeerConfig());
+    enabled = data.getEnabled();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
new file mode 100644
index 0000000..0b32db9
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/DisablePeerProcedure.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The procedure for disabling a replication peer.
+ */
+@InterfaceAudience.Private
+public class DisablePeerProcedure extends ModifyPeerProcedure {
+
+  private static final Log LOG = LogFactory.getLog(DisablePeerProcedure.class);
+
+  public DisablePeerProcedure() {
+  }
+
+  public DisablePeerProcedure(String peerId) {
+    super(peerId);
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.DISABLE;
+  }
+
+  @Override
+  protected void prePeerModification(MasterProcedureEnv env) throws IOException {
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.preDisableReplicationPeer(peerId);
+    }
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env)
+      throws IllegalArgumentException, Exception {
+    env.getReplicationManager().disableReplicationPeer(peerId);
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) throws IOException {
+    LOG.info("Successfully disabled peer " + peerId);
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.postDisableReplicationPeer(peerId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
new file mode 100644
index 0000000..92ba000
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/EnablePeerProcedure.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The procedure for enabling a replication peer.
+ */
+@InterfaceAudience.Private
+public class EnablePeerProcedure extends ModifyPeerProcedure {
+
+  private static final Log LOG = LogFactory.getLog(EnablePeerProcedure.class);
+
+  public EnablePeerProcedure() {
+  }
+
+  public EnablePeerProcedure(String peerId) {
+    super(peerId);
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.ENABLE;
+  }
+
+  @Override
+  protected void prePeerModification(MasterProcedureEnv env) throws IOException {
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.preEnableReplicationPeer(peerId);
+    }
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env) throws Exception {
+    env.getReplicationManager().enableReplicationPeer(peerId);
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) throws IOException {
+    LOG.info("Successfully enabled peer " + peerId);
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.postEnableReplicationPeer(peerId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
index fca05a7..7076bab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ModifyPeerProcedure.java
@@ -21,15 +21,22 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.master.procedure.PeerProcedureInterface;
+import org.apache.hadoop.hbase.master.procedure.ProcedurePrepareLatch;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
 import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
 import org.apache.yetus.audience.InterfaceAudience;
 
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyPeerStateData;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.PeerModificationState;
 
+/**
+ * The base class for all replication peer related procedure.
+ */
 @InterfaceAudience.Private
 public abstract class ModifyPeerProcedure
     extends StateMachineProcedure<MasterProcedureEnv, PeerModificationState>
@@ -39,11 +46,21 @@ public abstract class ModifyPeerProcedure
 
   protected String peerId;
 
+  // used to keep compatible with old client where we can only returns after updateStorage.
+  protected ProcedurePrepareLatch latch;
+
   protected ModifyPeerProcedure() {
   }
 
   protected ModifyPeerProcedure(String peerId) {
     this.peerId = peerId;
+    // TODO: temporarily set a 4.0 here to always wait for the procedure exection completed. Change
+    // to 3.0 or 2.0 after the client modification is done.
+    this.latch = ProcedurePrepareLatch.createLatch(4, 0);
+  }
+
+  public ProcedurePrepareLatch getLatch() {
+    return latch;
   }
 
   @Override
@@ -52,28 +69,58 @@ public abstract class ModifyPeerProcedure
   }
 
   /**
-   * Return {@code false} means that the operation is invalid and we should give up, otherwise
-   * {@code true}.
+   * Called before we start the actual processing. If an exception is thrown then we will give up
+   * and mark the procedure as failed directly.
+   */
+  protected abstract void prePeerModification(MasterProcedureEnv env) throws IOException;
+
+  /**
+   * We will give up and mark the procedure as failure if {@link IllegalArgumentException} is
+   * thrown, for other type of Exception we will retry.
+   */
+  protected abstract void updatePeerStorage(MasterProcedureEnv env)
+      throws IllegalArgumentException, Exception;
+
+  /**
+   * Called before we finish the procedure. The implementation can do some logging work, and also
+   * call the coprocessor hook if any.
    * <p>
-   * You need to call {@link #setFailure(String, Throwable)} to give the detail failure information.
+   * Notice that, since we have already done the actual work, throwing exception here will not fail
+   * this procedure, we will just ignore it and finish the procedure as suceeded.
    */
-  protected abstract boolean updatePeerStorage() throws IOException;
+  protected abstract void postPeerModification(MasterProcedureEnv env) throws IOException;
 
-  protected void postPeerModification() {
+  private void releaseLatch() {
+    ProcedurePrepareLatch.releaseLatch(latch, this);
   }
 
   @Override
   protected Flow executeFromState(MasterProcedureEnv env, PeerModificationState state)
       throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException {
     switch (state) {
-      case UPDATE_PEER_STORAGE:
+      case PRE_PEER_MODIFICATION:
         try {
-          if (!updatePeerStorage()) {
-            assert isFailed() : "setFailure is not called";
-            return Flow.NO_MORE_STATE;
-          }
+          prePeerModification(env);
         } catch (IOException e) {
-          LOG.warn("update peer storage failed, retry", e);
+          LOG.warn(getClass().getName() + " failed to call prePeerModification for peer " + peerId +
+            ", mark the procedure as failure and give up", e);
+          setFailure("prePeerModification", e);
+          releaseLatch();
+          return Flow.NO_MORE_STATE;
+        }
+        setNextState(PeerModificationState.UPDATE_PEER_STORAGE);
+        return Flow.HAS_MORE_STATE;
+      case UPDATE_PEER_STORAGE:
+        try {
+          updatePeerStorage(env);
+        } catch (IllegalArgumentException e) {
+          setFailure("master-" + getPeerOperationType().name().toLowerCase() + "-peer",
+            new DoNotRetryIOException(e));
+          releaseLatch();
+          return Flow.NO_MORE_STATE;
+        } catch (Exception e) {
+          LOG.warn(
+            getClass().getName() + " update peer storage for peer " + peerId + " failed, retry", e);
           throw new ProcedureYieldException();
         }
         setNextState(PeerModificationState.REFRESH_PEER_ON_RS);
@@ -85,7 +132,13 @@ public abstract class ModifyPeerProcedure
         setNextState(PeerModificationState.POST_PEER_MODIFICATION);
         return Flow.HAS_MORE_STATE;
       case POST_PEER_MODIFICATION:
-        postPeerModification();
+        try {
+          postPeerModification(env);
+        } catch (IOException e) {
+          LOG.warn(getClass().getName() + " failed to call prePeerModification for peer " + peerId +
+            ", ignore since the procedure has already done", e);
+        }
+        releaseLatch();
         return Flow.NO_MORE_STATE;
       default:
         throw new UnsupportedOperationException("unhandled state=" + state);
@@ -107,6 +160,12 @@ public abstract class ModifyPeerProcedure
   @Override
   protected void rollbackState(MasterProcedureEnv env, PeerModificationState state)
       throws IOException, InterruptedException {
+    if (state == PeerModificationState.PRE_PEER_MODIFICATION ||
+      state == PeerModificationState.UPDATE_PEER_STORAGE) {
+      // actually the peer related operations has no rollback, but if we haven't done any
+      // modifications on the peer storage, we can just return.
+      return;
+    }
     throw new UnsupportedOperationException();
   }
 
@@ -122,6 +181,18 @@ public abstract class ModifyPeerProcedure
 
   @Override
   protected PeerModificationState getInitialState() {
-    return PeerModificationState.UPDATE_PEER_STORAGE;
+    return PeerModificationState.PRE_PEER_MODIFICATION;
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.serializeStateData(serializer);
+    serializer.serialize(ModifyPeerStateData.newBuilder().setPeerId(peerId).build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.deserializeStateData(serializer);
+    peerId = serializer.deserialize(ModifyPeerStateData.class).getPeerId();
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
deleted file mode 100644
index 4e09107..0000000
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerCallable.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase.master.replication;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.executor.EventType;
-import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
-import org.apache.hadoop.hbase.regionserver.HRegionServer;
-import org.apache.yetus.audience.InterfaceAudience;
-
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
-
-/**
- * The callable executed at RS side to refresh the peer config/state.
- * <p>
- * TODO: only a dummy implementation for verifying the framework, will add implementation later.
- */
-@InterfaceAudience.Private
-public class RefreshPeerCallable implements RSProcedureCallable {
-
-  private HRegionServer rs;
-
-  private String peerId;
-
-  private Exception initError;
-
-  @Override
-  public Void call() throws Exception {
-    if (initError != null) {
-      throw initError;
-    }
-    rs.getFileSystem().create(new Path("/" + peerId + "/" + rs.getServerName().toString())).close();
-    return null;
-  }
-
-  @Override
-  public void init(byte[] parameter, HRegionServer rs) {
-    this.rs = rs;
-    try {
-      this.peerId = RefreshPeerParameter.parseFrom(parameter).getPeerId();
-    } catch (InvalidProtocolBufferException e) {
-      initError = e;
-      return;
-    }
-  }
-
-  @Override
-  public EventType getEventType() {
-    return EventType.RS_REFRESH_PEER;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
index 18da487..ddc2401 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RefreshPeerProcedure.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.hbase.procedure2.ProcedureSuspendedException;
 import org.apache.hadoop.hbase.procedure2.ProcedureYieldException;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteOperation;
 import org.apache.hadoop.hbase.procedure2.RemoteProcedureDispatcher.RemoteProcedure;
+import org.apache.hadoop.hbase.procedure2.RemoteProcedureException;
+import org.apache.hadoop.hbase.replication.regionserver.RefreshPeerCallable;
 import org.apache.yetus.audience.InterfaceAudience;
 
 import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
@@ -118,15 +120,22 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
             .setTargetServer(ProtobufUtil.toServerName(remote)).build().toByteArray());
   }
 
-  private void complete(MasterProcedureEnv env, boolean succ) {
+  private void complete(MasterProcedureEnv env, Throwable error) {
     if (event == null) {
       LOG.warn("procedure event for " + getProcId() +
-          " is null, maybe the procedure is created when recovery", new Exception());
+          " is null, maybe the procedure is created when recovery",
+        new Exception());
       return;
     }
-    LOG.info("Refresh peer " + peerId + " for " + type + " on " + targetServer +
-        (succ ? " suceeded" : " failed"));
-    this.succ = succ;
+    if (error != null) {
+      LOG.warn("Refresh peer " + peerId + " for " + type + " on " + targetServer + " failed",
+        error);
+      this.succ = false;
+    } else {
+      LOG.info("Refresh peer " + peerId + " for " + type + " on " + targetServer + " suceeded");
+      this.succ = true;
+    }
+
     event.wake(env.getProcedureScheduler());
     event = null;
   }
@@ -134,17 +143,18 @@ public class RefreshPeerProcedure extends Procedure<MasterProcedureEnv>
   @Override
   public synchronized void remoteCallFailed(MasterProcedureEnv env, ServerName remote,
       IOException exception) {
-    complete(env, false);
+    complete(env, exception);
   }
 
   @Override
   public synchronized void remoteOperationCompleted(MasterProcedureEnv env) {
-    complete(env, true);
+    complete(env, null);
   }
 
   @Override
-  public synchronized void remoteOperationFailed(MasterProcedureEnv env, String error) {
-    complete(env, false);
+  public synchronized void remoteOperationFailed(MasterProcedureEnv env,
+      RemoteProcedureException error) {
+    complete(env, error);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
new file mode 100644
index 0000000..3daad6d
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/RemovePeerProcedure.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * The procedure for removing a replication peer.
+ */
+@InterfaceAudience.Private
+public class RemovePeerProcedure extends ModifyPeerProcedure {
+
+  private static final Log LOG = LogFactory.getLog(RemovePeerProcedure.class);
+
+  public RemovePeerProcedure() {
+  }
+
+  public RemovePeerProcedure(String peerId) {
+    super(peerId);
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.REMOVE;
+  }
+
+  @Override
+  protected void prePeerModification(MasterProcedureEnv env) throws IOException {
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.preRemoveReplicationPeer(peerId);
+    }
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env) throws Exception {
+    env.getReplicationManager().removeReplicationPeer(peerId);
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) throws IOException {
+    LOG.info("Successfully removed peer " + peerId);
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.postRemoveReplicationPeer(peerId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
index 749448d..984d3fb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/ReplicationManager.java
@@ -27,10 +27,8 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Abortable;
-import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.ReplicationPeerNotFoundException;
 import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.hadoop.hbase.replication.BaseReplicationEndpoint;
 import org.apache.hadoop.hbase.replication.ReplicationException;
 import org.apache.hadoop.hbase.replication.ReplicationFactory;
@@ -39,24 +37,21 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.replication.ReplicationPeers;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
 import org.apache.hadoop.hbase.replication.ReplicationQueuesClientArguments;
+import org.apache.hadoop.hbase.zookeeper.ZKWatcher;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
  * Manages and performs all replication admin operations.
+ * <p>
  * Used to add/remove a replication peer.
  */
 @InterfaceAudience.Private
 public class ReplicationManager {
-
-  private final Configuration conf;
-  private final ZKWatcher zkw;
   private final ReplicationQueuesClient replicationQueuesClient;
   private final ReplicationPeers replicationPeers;
 
   public ReplicationManager(Configuration conf, ZKWatcher zkw, Abortable abortable)
       throws IOException {
-    this.conf = conf;
-    this.zkw = zkw;
     try {
       this.replicationQueuesClient = ReplicationFactory
           .getReplicationQueuesClient(new ReplicationQueuesClientArguments(conf, abortable, zkw));
@@ -70,7 +65,7 @@ public class ReplicationManager {
   }
 
   public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
-      throws ReplicationException, IOException {
+      throws ReplicationException {
     checkPeerConfig(peerConfig);
     replicationPeers.registerPeer(peerId, peerConfig, enabled);
     replicationPeers.peerConnected(peerId);
@@ -89,8 +84,8 @@ public class ReplicationManager {
     this.replicationPeers.disablePeer(peerId);
   }
 
-  public ReplicationPeerConfig getPeerConfig(String peerId) throws ReplicationException,
-      ReplicationPeerNotFoundException {
+  public ReplicationPeerConfig getPeerConfig(String peerId)
+      throws ReplicationException, ReplicationPeerNotFoundException {
     ReplicationPeerConfig peerConfig = replicationPeers.getReplicationPeerConfig(peerId);
     if (peerConfig == null) {
       throw new ReplicationPeerNotFoundException(peerId);
@@ -110,9 +105,9 @@ public class ReplicationManager {
     List<String> peerIds = replicationPeers.getAllPeerIds();
     for (String peerId : peerIds) {
       if (pattern == null || (pattern != null && pattern.matcher(peerId).matches())) {
-        peers.add(new ReplicationPeerDescription(peerId, replicationPeers
-            .getStatusOfPeerFromBackingStore(peerId), replicationPeers
-            .getReplicationPeerConfig(peerId)));
+        peers.add(new ReplicationPeerDescription(peerId,
+            replicationPeers.getStatusOfPeerFromBackingStore(peerId),
+            replicationPeers.getReplicationPeerConfig(peerId)));
       }
     }
     return peers;
@@ -126,13 +121,12 @@ public class ReplicationManager {
    * If replicate_all flag is false, it means all user tables can't be replicated to peer cluster.
    * Then allow to config namespaces or table-cfs which will be replicated to peer cluster.
    */
-  private void checkPeerConfig(ReplicationPeerConfig peerConfig)
-      throws ReplicationException, IOException {
+  private void checkPeerConfig(ReplicationPeerConfig peerConfig) {
     if (peerConfig.replicateAllUserTables()) {
-      if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty())
-          || (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) {
-        throw new ReplicationException("Need clean namespaces or table-cfs config firstly"
-            + " when replicate_all flag is true");
+      if ((peerConfig.getNamespaces() != null && !peerConfig.getNamespaces().isEmpty()) ||
+        (peerConfig.getTableCFsMap() != null && !peerConfig.getTableCFsMap().isEmpty())) {
+        throw new IllegalArgumentException("Need clean namespaces or table-cfs config firstly " +
+          "when you want replicate all cluster");
       }
       checkNamespacesAndTableCfsConfigConflict(peerConfig.getExcludeNamespaces(),
         peerConfig.getExcludeTableCFsMap());
@@ -140,7 +134,7 @@ public class ReplicationManager {
       if ((peerConfig.getExcludeNamespaces() != null && !peerConfig.getNamespaces().isEmpty())
           || (peerConfig.getExcludeTableCFsMap() != null
               && !peerConfig.getTableCFsMap().isEmpty())) {
-        throw new ReplicationException(
+        throw new IllegalArgumentException(
             "Need clean exclude-namespaces or exclude-table-cfs config firstly"
                 + " when replicate_all flag is false");
       }
@@ -153,20 +147,24 @@ public class ReplicationManager {
   /**
    * Set a namespace in the peer config means that all tables in this namespace will be replicated
    * to the peer cluster.
-   * 1. If peer config already has a namespace, then not allow set any table of this namespace
-   *    to the peer config.
-   * 2. If peer config already has a table, then not allow set this table's namespace to the peer
-   *    config.
-   *
+   * <ol>
+   * <li>If peer config already has a namespace, then not allow set any table of this namespace to
+   * the peer config.</li>
+   * <li>If peer config already has a table, then not allow set this table's namespace to the peer
+   * config.</li>
+   * </ol>
+   * <p>
    * Set a exclude namespace in the peer config means that all tables in this namespace can't be
    * replicated to the peer cluster.
-   * 1. If peer config already has a exclude namespace, then not allow set any exclude table of
-   *    this namespace to the peer config.
-   * 2. If peer config already has a exclude table, then not allow set this table's namespace
-   *    as a exclude namespace.
+   * <ol>
+   * <li>If peer config already has a exclude namespace, then not allow set any exclude table of
+   * this namespace to the peer config.</li>
+   * <li>If peer config already has a exclude table, then not allow set this table's namespace as a
+   * exclude namespace.</li>
+   * </ol>
    */
   private void checkNamespacesAndTableCfsConfigConflict(Set<String> namespaces,
-      Map<TableName, ? extends Collection<String>> tableCfs) throws ReplicationException {
+      Map<TableName, ? extends Collection<String>> tableCfs) {
     if (namespaces == null || namespaces.isEmpty()) {
       return;
     }
@@ -176,24 +174,22 @@ public class ReplicationManager {
     for (Map.Entry<TableName, ? extends Collection<String>> entry : tableCfs.entrySet()) {
       TableName table = entry.getKey();
       if (namespaces.contains(table.getNamespaceAsString())) {
-        throw new ReplicationException("Table-cfs " + table + " is conflict with namespaces "
+        throw new IllegalArgumentException("Table-cfs " + table + " is conflict with namespaces "
             + table.getNamespaceAsString() + " in peer config");
       }
     }
   }
 
-  private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig)
-      throws IOException {
-    String filterCSV = peerConfig.getConfiguration().
-        get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
-    if (filterCSV != null && !filterCSV.isEmpty()){
-      String [] filters = filterCSV.split(",");
+  private void checkConfiguredWALEntryFilters(ReplicationPeerConfig peerConfig) {
+    String filterCSV = peerConfig.getConfiguration()
+        .get(BaseReplicationEndpoint.REPLICATION_WALENTRYFILTER_CONFIG_KEY);
+    if (filterCSV != null && !filterCSV.isEmpty()) {
+      String[] filters = filterCSV.split(",");
       for (String filter : filters) {
         try {
-          Class clazz = Class.forName(filter);
-          Object o = clazz.newInstance();
+          Class.forName(filter).newInstance();
         } catch (Exception e) {
-          throw new DoNotRetryIOException("Configured WALEntryFilter " + filter +
+          throw new IllegalArgumentException("Configured WALEntryFilter " + filter +
               " could not be created. Failing add/update " + "peer operation.", e);
         }
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.java
new file mode 100644
index 0000000..435eefc
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/UpdatePeerConfigProcedure.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.replication;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil;
+import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.UpdatePeerConfigStateData;
+
+/**
+ * The procedure for updating the config for a replication peer.
+ */
+@InterfaceAudience.Private
+public class UpdatePeerConfigProcedure extends ModifyPeerProcedure {
+
+  private static final Log LOG = LogFactory.getLog(UpdatePeerConfigProcedure.class);
+
+  private ReplicationPeerConfig peerConfig;
+
+  public UpdatePeerConfigProcedure() {
+  }
+
+  public UpdatePeerConfigProcedure(String peerId, ReplicationPeerConfig peerConfig) {
+    super(peerId);
+    this.peerConfig = peerConfig;
+  }
+
+  @Override
+  public PeerOperationType getPeerOperationType() {
+    return PeerOperationType.UPDATE_CONFIG;
+  }
+
+  @Override
+  protected void prePeerModification(MasterProcedureEnv env) throws IOException {
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.preUpdateReplicationPeerConfig(peerId, peerConfig);
+    }
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env)
+      throws IllegalArgumentException, Exception {
+    env.getReplicationManager().updatePeerConfig(peerId, peerConfig);
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) throws IOException {
+    LOG.info("Successfully updated peer config of " + peerId + " to " + peerConfig);
+    MasterCoprocessorHost cpHost = env.getMasterCoprocessorHost();
+    if (cpHost != null) {
+      cpHost.postUpdateReplicationPeerConfig(peerId, peerConfig);
+    }
+  }
+
+  @Override
+  protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.serializeStateData(serializer);
+    serializer.serialize(UpdatePeerConfigStateData.newBuilder()
+        .setPeerConfig(ReplicationPeerConfigUtil.convert(peerConfig)).build());
+  }
+
+  @Override
+  protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
+    super.deserializeStateData(serializer);
+    peerConfig = ReplicationPeerConfigUtil
+        .convert(serializer.deserialize(UpdatePeerConfigStateData.class).getPeerConfig());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 779451b..6015f24 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -149,6 +149,7 @@ import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.ForeignExceptionUtil;
 import org.apache.hadoop.hbase.util.HasThread;
 import org.apache.hadoop.hbase.util.JvmPauseMonitor;
 import org.apache.hadoop.hbase.util.NettyEventLoopGroupConfig;
@@ -178,7 +179,6 @@ import org.apache.zookeeper.KeeperException;
 
 import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Throwables;
 import org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannel;
 import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
@@ -3741,7 +3741,7 @@ public class HRegionServer extends HasThread implements
       ReportProcedureDoneRequest.newBuilder().setProcId(procId);
     if (error != null) {
       builder.setStatus(ReportProcedureDoneRequest.Status.ERROR)
-          .setError(Throwables.getStackTraceAsString(error));
+          .setError(ForeignExceptionUtil.toProtoForeignException(serverName.toString(), error));
     } else {
       builder.setStatus(ReportProcedureDoneRequest.Status.SUCCESS);
     }

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
new file mode 100644
index 0000000..a47a483
--- /dev/null
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RefreshPeerCallable.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.replication.regionserver;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.executor.EventType;
+import org.apache.hadoop.hbase.procedure2.RSProcedureCallable;
+import org.apache.hadoop.hbase.regionserver.HRegionServer;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.RefreshPeerParameter;
+
+/**
+ * The callable executed at RS side to refresh the peer config/state.
+ * <p>
+ * TODO: only a dummy implementation for verifying the framework, will add implementation later.
+ */
+@InterfaceAudience.Private
+public class RefreshPeerCallable implements RSProcedureCallable {
+
+  private HRegionServer rs;
+
+  private String peerId;
+
+  private Exception initError;
+
+  @Override
+  public Void call() throws Exception {
+    if (initError != null) {
+      throw initError;
+    }
+    Path dir = new Path("/" + peerId);
+    if (rs.getFileSystem().exists(dir)) {
+      rs.getFileSystem().create(new Path(dir, rs.getServerName().toString())).close();
+    }
+    return null;
+  }
+
+  @Override
+  public void init(byte[] parameter, HRegionServer rs) {
+    this.rs = rs;
+    try {
+      this.peerId = RefreshPeerParameter.parseFrom(parameter).getPeerId();
+    } catch (InvalidProtocolBufferException e) {
+      initError = e;
+      return;
+    }
+  }
+
+  @Override
+  public EventType getEventType() {
+    return EventType.RS_REFRESH_PEER;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
index 413abe3..540a67c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/MockNoopMasterServices.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.master.assignment.AssignmentManager;
 import org.apache.hadoop.hbase.master.locking.LockManager;
 import org.apache.hadoop.hbase.master.normalizer.RegionNormalizer;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
+import org.apache.hadoop.hbase.master.replication.ReplicationManager;
 import org.apache.hadoop.hbase.master.snapshot.SnapshotManager;
 import org.apache.hadoop.hbase.procedure.MasterProcedureManagerHost;
 import org.apache.hadoop.hbase.procedure2.LockedResource;
@@ -369,7 +370,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
 
   @Override
   public ClusterConnection getClusterConnection() {
-    // TODO Auto-generated method stub
     return null;
   }
 
@@ -399,20 +399,24 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
-  public void addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
+  public long addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, boolean enabled)
       throws ReplicationException {
+    return 0;
   }
 
   @Override
-  public void removeReplicationPeer(String peerId) throws ReplicationException {
+  public long removeReplicationPeer(String peerId) throws ReplicationException {
+    return 0;
   }
 
   @Override
-  public void enableReplicationPeer(String peerId) throws ReplicationException, IOException {
+  public long enableReplicationPeer(String peerId) throws ReplicationException, IOException {
+    return 0;
   }
 
   @Override
-  public void disableReplicationPeer(String peerId) throws ReplicationException, IOException {
+  public long disableReplicationPeer(String peerId) throws ReplicationException, IOException {
+    return 0;
   }
 
   @Override
@@ -422,8 +426,9 @@ public class MockNoopMasterServices implements MasterServices, Server {
   }
 
   @Override
-  public void updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
+  public long updateReplicationPeerConfig(String peerId, ReplicationPeerConfig peerConfig)
       throws ReplicationException, IOException {
+    return 0;
   }
 
   @Override
@@ -458,7 +463,6 @@ public class MockNoopMasterServices implements MasterServices, Server {
 
   @Override
   public ProcedureEvent getInitializedEvent() {
-    // TODO Auto-generated method stub
     return null;
   }
 
@@ -471,4 +475,9 @@ public class MockNoopMasterServices implements MasterServices, Server {
   public Connection createConnection(Configuration conf) throws IOException {
     return null;
   }
+
+  @Override
+  public ReplicationManager getReplicationManager() {
+    return null;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/87a5b422/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
----------------------------------------------------------------------
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
index 44343d7..ed7c6fa 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/replication/DummyModifyPeerProcedure.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hbase.master.replication;
 
-import java.io.IOException;
+import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 
 public class DummyModifyPeerProcedure extends ModifyPeerProcedure {
 
@@ -34,8 +34,15 @@ public class DummyModifyPeerProcedure extends ModifyPeerProcedure {
   }
 
   @Override
-  protected boolean updatePeerStorage() throws IOException {
-    return true;
+  protected void prePeerModification(MasterProcedureEnv env) {
+  }
+
+  @Override
+  protected void updatePeerStorage(MasterProcedureEnv env) {
+  }
+
+  @Override
+  protected void postPeerModification(MasterProcedureEnv env) {
   }
 
 }


[08/24] hbase git commit: HBASE-18440 ITs and Actions modify immutable TableDescriptors

Posted by zh...@apache.org.
HBASE-18440 ITs and Actions modify immutable TableDescriptors

Signed-off-by: Guanghao Zhang <zg...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74beb5a3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74beb5a3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74beb5a3

Branch: refs/heads/HBASE-19397
Commit: 74beb5a3b95c64abd8cdceb67db0233b89da0746
Parents: e343b0c
Author: Mike Drob <md...@apache.org>
Authored: Sun Jul 23 12:57:23 2017 -0500
Committer: Guanghao Zhang <zg...@apache.org>
Committed: Tue Dec 19 09:59:07 2017 +0800

----------------------------------------------------------------------
 .../IntegrationTestIngestWithEncryption.java    | 19 ++++++++------
 .../hbase/chaos/actions/AddColumnAction.java    | 21 ++++++++-------
 .../actions/DecreaseMaxHFileSizeAction.java     | 27 ++++++++++++--------
 3 files changed, 40 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/74beb5a3/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
index 1f85a51..e730239 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestIngestWithEncryption.java
@@ -24,6 +24,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Waiter.Predicate;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.io.crypto.KeyProviderForTesting;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.io.hfile.HFileReaderImpl;
@@ -73,7 +76,7 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
     try {
       EncryptionTest.testEncryption(conf, "AES", null);
     } catch (Exception e) {
-      LOG.warn("Encryption configuration test did not pass, skipping test");
+      LOG.warn("Encryption configuration test did not pass, skipping test", e);
       return;
     }
     super.setUpCluster();
@@ -94,14 +97,14 @@ public class IntegrationTestIngestWithEncryption extends IntegrationTestIngest {
     // Update the test table schema so HFiles from this point will be written with
     // encryption features enabled.
     final Admin admin = util.getAdmin();
-    HTableDescriptor tableDescriptor =
-        new HTableDescriptor(admin.getTableDescriptor(getTablename()));
-    for (HColumnDescriptor columnDescriptor: tableDescriptor.getColumnFamilies()) {
-      columnDescriptor.setEncryptionType("AES");
-      LOG.info("Updating CF schema for " + getTablename() + "." +
-        columnDescriptor.getNameAsString());
+    TableDescriptor tableDescriptor = admin.getDescriptor(getTablename());
+    for (ColumnFamilyDescriptor columnDescriptor : tableDescriptor.getColumnFamilies()) {
+      ColumnFamilyDescriptor updatedColumn = ColumnFamilyDescriptorBuilder
+          .newBuilder(columnDescriptor).setEncryptionType("AES").build();
+      LOG.info(
+        "Updating CF schema for " + getTablename() + "." + columnDescriptor.getNameAsString());
       admin.disableTable(getTablename());
-      admin.modifyColumnFamily(getTablename(), columnDescriptor);
+      admin.modifyColumnFamily(getTablename(), updatedColumn);
       admin.enableTable(getTablename());
       util.waitFor(30000, 1000, true, new Predicate<IOException>() {
         @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/74beb5a3/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
index 0ef8cfd..6c8554a 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/AddColumnAction.java
@@ -21,10 +21,12 @@ package org.apache.hadoop.hbase.chaos.actions;
 import java.io.IOException;
 
 import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 
 /**
  * Action the adds a column family to a table.
@@ -45,12 +47,12 @@ public class AddColumnAction extends Action {
 
   @Override
   public void perform() throws Exception {
-    HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
-    HColumnDescriptor columnDescriptor = null;
+    TableDescriptor tableDescriptor = admin.getDescriptor(tableName);
+    ColumnFamilyDescriptor columnDescriptor = null;
 
-    while(columnDescriptor == null ||
-        tableDescriptor.getFamily(columnDescriptor.getName()) != null) {
-      columnDescriptor = new HColumnDescriptor(RandomStringUtils.randomAlphabetic(5));
+    while (columnDescriptor == null
+        || tableDescriptor.getColumnFamily(columnDescriptor.getName()) != null) {
+      columnDescriptor = ColumnFamilyDescriptorBuilder.of(RandomStringUtils.randomAlphabetic(5));
     }
 
     // Don't try the modify if we're stopping
@@ -60,7 +62,8 @@ public class AddColumnAction extends Action {
 
     LOG.debug("Performing action: Adding " + columnDescriptor + " to " + tableName);
 
-    tableDescriptor.addFamily(columnDescriptor);
-    admin.modifyTable(tableName, tableDescriptor);
+    TableDescriptor modifiedTable = TableDescriptorBuilder.newBuilder(tableDescriptor)
+        .addColumnFamily(columnDescriptor).build();
+    admin.modifyTable(modifiedTable);
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/74beb5a3/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java
----------------------------------------------------------------------
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java
index 98babeb..4610ef0 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/DecreaseMaxHFileSizeAction.java
@@ -18,13 +18,14 @@
 
 package org.apache.hadoop.hbase.chaos.actions;
 
-import org.apache.hadoop.hbase.HBaseTestingUtility;
+import java.io.IOException;
+import java.util.Random;
+
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
-
-import java.util.Random;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
 
 public class DecreaseMaxHFileSizeAction extends Action {
 
@@ -33,6 +34,7 @@ public class DecreaseMaxHFileSizeAction extends Action {
   private final long sleepTime;
   private final TableName tableName;
   private final Random random;
+  private Admin admin;
 
   public DecreaseMaxHFileSizeAction(long sleepTime, TableName tableName) {
     this.sleepTime = sleepTime;
@@ -41,13 +43,17 @@ public class DecreaseMaxHFileSizeAction extends Action {
   }
 
   @Override
+  public void init(ActionContext context) throws IOException {
+    super.init(context);
+    this.admin = context.getHBaseIntegrationTestingUtility().getAdmin();
+  }
+
+  @Override
   public void perform() throws Exception {
-    HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility();
-    Admin admin = util.getAdmin();
-    HTableDescriptor htd = admin.getTableDescriptor(tableName);
+    TableDescriptor td = admin.getDescriptor(tableName);
 
     // Try and get the current value.
-    long currentValue = htd.getMaxFileSize();
+    long currentValue = td.getMaxFileSize();
 
     // If the current value is not set use the default for the cluster.
     // If configs are really weird this might not work.
@@ -66,7 +72,8 @@ public class DecreaseMaxHFileSizeAction extends Action {
     newValue = Math.max(minFileSize, newValue) - (512 - random.nextInt(1024));
 
     // Change the table descriptor.
-    htd.setMaxFileSize(newValue);
+    TableDescriptor modifiedTable =
+        TableDescriptorBuilder.newBuilder(td).setMaxFileSize(newValue).build();
 
     // Don't try the modify if we're stopping
     if (context.isStopping()) {
@@ -74,7 +81,7 @@ public class DecreaseMaxHFileSizeAction extends Action {
     }
 
     // modify the table.
-    admin.modifyTable(tableName, htd);
+    admin.modifyTable(modifiedTable);
 
     // Sleep some time.
     if (sleepTime > 0) {


[05/24] hbase git commit: HBASE-19549 Change path comparison in CommonFSUtils

Posted by zh...@apache.org.
HBASE-19549 Change path comparison in CommonFSUtils

Also change makeQualified(FileSystem fs)
to makeQualified(URI defaultUri, Path workingDir)

Signed-off-by: Michael Stack <st...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/abae9078
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/abae9078
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/abae9078

Branch: refs/heads/HBASE-19397
Commit: abae90787fdf2a207ebacae3223eee21cf3f7659
Parents: b4056d2
Author: Peter Somogyi <ps...@cloudera.com>
Authored: Mon Dec 18 13:39:25 2017 +0100
Committer: Michael Stack <st...@apache.org>
Committed: Mon Dec 18 15:27:39 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/hbase/util/CommonFSUtils.java  | 10 ++++++----
 .../apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java |  2 +-
 2 files changed, 7 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/abae9078/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
----------------------------------------------------------------------
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index eba3b12..0e0372c 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -356,7 +356,7 @@ public abstract class CommonFSUtils {
   public static Path getRootDir(final Configuration c) throws IOException {
     Path p = new Path(c.get(HConstants.HBASE_DIR));
     FileSystem fs = p.getFileSystem(c);
-    return p.makeQualified(fs);
+    return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
   public static void setRootDir(final Configuration c, final Path root) throws IOException {
@@ -384,7 +384,7 @@ public abstract class CommonFSUtils {
       return getRootDir(c);
     }
     FileSystem fs = p.getFileSystem(c);
-    return p.makeQualified(fs);
+    return p.makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
   @VisibleForTesting
@@ -399,8 +399,10 @@ public abstract class CommonFSUtils {
 
   private static boolean isValidWALRootDir(Path walDir, final Configuration c) throws IOException {
     Path rootDir = getRootDir(c);
-    if (!walDir.equals(rootDir)) {
-      if (walDir.toString().startsWith(rootDir.toString() + "/")) {
+    FileSystem fs = walDir.getFileSystem(c);
+    Path qualifiedWalDir = walDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
+    if (!qualifiedWalDir.equals(rootDir)) {
+      if (qualifiedWalDir.toString().startsWith(rootDir.toString() + "/")) {
         throw new IllegalStateException("Illegal WAL directory specified. " +
             "WAL directories are not permitted to be under the root directory if set.");
       }

http://git-wip-us.apache.org/repos/asf/hbase/blob/abae9078/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
----------------------------------------------------------------------
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index c37f284..dc3bb61 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -965,7 +965,7 @@ public class TableMapReduceUtil {
     }
 
     LOG.debug(String.format("For class %s, using jar %s", my_class.getName(), jar));
-    return new Path(jar).makeQualified(fs);
+    return new Path(jar).makeQualified(fs.getUri(), fs.getWorkingDirectory());
   }
 
   /**